##// END OF EJS Templates
Break apart hg.py...
mpm@selenic.com -
r1089:142b5d5e default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -0,0 +1,36 b''
1 """
2 node.py - basic nodeid manipulation for mercurial
3
4 Copyright 2005 Matt Mackall <mpm@selenic.com>
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8 """
9
10 import sha, binascii
11
12 nullid = "\0" * 20
13
14 def hex(node):
15 return binascii.hexlify(node)
16
17 def bin(node):
18 return binascii.unhexlify(node)
19
20 def short(node):
21 return hex(node[:6])
22
23 def hash(text, p1, p2):
24 """generate a hash from the given text and its parent hashes
25
26 This hash combines both the current file contents and its history
27 in a manner that makes it easy to distinguish nodes with the same
28 content in the revision graph.
29 """
30 l = [p1, p2]
31 l.sort()
32 s = sha.new(l[0])
33 s.update(l[1])
34 s.update(text)
35 return s.digest()
36
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
This diff has been collapsed as it changes many lines, (2254 lines changed) Show them Hide them
@@ -9,255 +9,7 b' import sys, struct, os'
9 import util
9 import util
10 from revlog import *
10 from revlog import *
11 from demandload import *
11 from demandload import *
12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
12 demandload(globals(), "time")
13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 demandload(globals(), "bisect errno select stat")
15
16 class filelog(revlog):
17 def __init__(self, opener, path):
18 revlog.__init__(self, opener,
19 os.path.join("data", self.encodedir(path + ".i")),
20 os.path.join("data", self.encodedir(path + ".d")))
21
22 # This avoids a collision between a file named foo and a dir named
23 # foo.i or foo.d
24 def encodedir(self, path):
25 return (path
26 .replace(".hg/", ".hg.hg/")
27 .replace(".i/", ".i.hg/")
28 .replace(".d/", ".d.hg/"))
29
30 def decodedir(self, path):
31 return (path
32 .replace(".d.hg/", ".d/")
33 .replace(".i.hg/", ".i/")
34 .replace(".hg.hg/", ".hg/"))
35
36 def read(self, node):
37 t = self.revision(node)
38 if not t.startswith('\1\n'):
39 return t
40 s = t.find('\1\n', 2)
41 return t[s+2:]
42
43 def readmeta(self, node):
44 t = self.revision(node)
45 if not t.startswith('\1\n'):
46 return t
47 s = t.find('\1\n', 2)
48 mt = t[2:s]
49 for l in mt.splitlines():
50 k, v = l.split(": ", 1)
51 m[k] = v
52 return m
53
54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 if meta or text.startswith('\1\n'):
56 mt = ""
57 if meta:
58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 return self.addrevision(text, transaction, link, p1, p2)
61
62 def annotate(self, node):
63
64 def decorate(text, rev):
65 return ([rev] * len(text.splitlines()), text)
66
67 def pair(parent, child):
68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 child[0][b1:b2] = parent[0][a1:a2]
70 return child
71
72 # find all ancestors
73 needed = {node:1}
74 visit = [node]
75 while visit:
76 n = visit.pop(0)
77 for p in self.parents(n):
78 if p not in needed:
79 needed[p] = 1
80 visit.append(p)
81 else:
82 # count how many times we'll use this
83 needed[p] += 1
84
85 # sort by revision which is a topological order
86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 visit.sort()
88 hist = {}
89
90 for r,n in visit:
91 curr = decorate(self.read(n), self.linkrev(n))
92 for p in self.parents(n):
93 if p != nullid:
94 curr = pair(hist[p], curr)
95 # trim the history of unneeded revs
96 needed[p] -= 1
97 if not needed[p]:
98 del hist[p]
99 hist[n] = curr
100
101 return zip(hist[n][0], hist[n][1].splitlines(1))
102
103 class manifest(revlog):
104 def __init__(self, opener):
105 self.mapcache = None
106 self.listcache = None
107 self.addlist = None
108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109
110 def read(self, node):
111 if node == nullid: return {} # don't upset local cache
112 if self.mapcache and self.mapcache[0] == node:
113 return self.mapcache[1]
114 text = self.revision(node)
115 map = {}
116 flag = {}
117 self.listcache = (text, text.splitlines(1))
118 for l in self.listcache[1]:
119 (f, n) = l.split('\0')
120 map[f] = bin(n[:40])
121 flag[f] = (n[40:-1] == "x")
122 self.mapcache = (node, map, flag)
123 return map
124
125 def readflags(self, node):
126 if node == nullid: return {} # don't upset local cache
127 if not self.mapcache or self.mapcache[0] != node:
128 self.read(node)
129 return self.mapcache[2]
130
131 def diff(self, a, b):
132 # this is sneaky, as we're not actually using a and b
133 if self.listcache and self.addlist and self.listcache[0] == a:
134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 if mdiff.patch(a, d) != b:
136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 return mdiff.textdiff(a, b)
138 return d
139 else:
140 return mdiff.textdiff(a, b)
141
142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 changed=None):
144 # directly generate the mdiff delta from the data collected during
145 # the bisect loop below
146 def gendelta(delta):
147 i = 0
148 result = []
149 while i < len(delta):
150 start = delta[i][2]
151 end = delta[i][3]
152 l = delta[i][4]
153 if l == None:
154 l = ""
155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 and end >= delta[i+1][2]:
157 if delta[i+1][3] > end:
158 end = delta[i+1][3]
159 if delta[i+1][4]:
160 l += delta[i+1][4]
161 i += 1
162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 i += 1
164 return result
165
166 # apply the changes collected during the bisect loop to our addlist
167 def addlistdelta(addlist, delta):
168 # apply the deltas to the addlist. start from the bottom up
169 # so changes to the offsets don't mess things up.
170 i = len(delta)
171 while i > 0:
172 i -= 1
173 start = delta[i][0]
174 end = delta[i][1]
175 if delta[i][4]:
176 addlist[start:end] = [delta[i][4]]
177 else:
178 del addlist[start:end]
179 return addlist
180
181 # calculate the byte offset of the start of each line in the
182 # manifest
183 def calcoffsets(addlist):
184 offsets = [0] * (len(addlist) + 1)
185 offset = 0
186 i = 0
187 while i < len(addlist):
188 offsets[i] = offset
189 offset += len(addlist[i])
190 i += 1
191 offsets[i] = offset
192 return offsets
193
194 # if we're using the listcache, make sure it is valid and
195 # parented by the same node we're diffing against
196 if not changed or not self.listcache or not p1 or \
197 self.mapcache[0] != p1:
198 files = map.keys()
199 files.sort()
200
201 self.addlist = ["%s\000%s%s\n" %
202 (f, hex(map[f]), flags[f] and "x" or '')
203 for f in files]
204 cachedelta = None
205 else:
206 addlist = self.listcache[1]
207
208 # find the starting offset for each line in the add list
209 offsets = calcoffsets(addlist)
210
211 # combine the changed lists into one list for sorting
212 work = [[x, 0] for x in changed[0]]
213 work[len(work):] = [[x, 1] for x in changed[1]]
214 work.sort()
215
216 delta = []
217 bs = 0
218
219 for w in work:
220 f = w[0]
221 # bs will either be the index of the item or the insert point
222 bs = bisect.bisect(addlist, f, bs)
223 if bs < len(addlist):
224 fn = addlist[bs][:addlist[bs].index('\0')]
225 else:
226 fn = None
227 if w[1] == 0:
228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 flags[f] and "x" or '')
230 else:
231 l = None
232 start = bs
233 if fn != f:
234 # item not found, insert a new one
235 end = bs
236 if w[1] == 1:
237 sys.stderr.write("failed to remove %s from manifest\n"
238 % f)
239 sys.exit(1)
240 else:
241 # item is found, replace/delete the existing line
242 end = bs + 1
243 delta.append([start, end, offsets[start], offsets[end], l])
244
245 self.addlist = addlistdelta(addlist, delta)
246 if self.mapcache[0] == self.tip():
247 cachedelta = "".join(gendelta(delta))
248 else:
249 cachedelta = None
250
251 text = "".join(self.addlist)
252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 sys.stderr.write("manifest delta failure\n")
254 sys.exit(1)
255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 self.mapcache = (n, map, flags)
257 self.listcache = (text, self.addlist)
258 self.addlist = None
259
260 return n
261
13
262 class changelog(revlog):
14 class changelog(revlog):
263 def __init__(self, opener):
15 def __init__(self, opener):
@@ -290,2007 +42,3 b' class changelog(revlog):'
290 l = [hex(manifest), user, date] + list + ["", desc]
42 l = [hex(manifest), user, date] + list + ["", desc]
291 text = "\n".join(l)
43 text = "\n".join(l)
292 return self.addrevision(text, transaction, self.count(), p1, p2)
44 return self.addrevision(text, transaction, self.count(), p1, p2)
293
294 class dirstate:
295 def __init__(self, opener, ui, root):
296 self.opener = opener
297 self.root = root
298 self.dirty = 0
299 self.ui = ui
300 self.map = None
301 self.pl = None
302 self.copies = {}
303 self.ignorefunc = None
304
305 def wjoin(self, f):
306 return os.path.join(self.root, f)
307
308 def getcwd(self):
309 cwd = os.getcwd()
310 if cwd == self.root: return ''
311 return cwd[len(self.root) + 1:]
312
313 def ignore(self, f):
314 if not self.ignorefunc:
315 bigpat = []
316 try:
317 l = file(self.wjoin(".hgignore"))
318 for pat in l:
319 p = pat.rstrip()
320 if p:
321 try:
322 re.compile(p)
323 except:
324 self.ui.warn("ignoring invalid ignore"
325 + " regular expression '%s'\n" % p)
326 else:
327 bigpat.append(p)
328 except IOError: pass
329
330 if bigpat:
331 s = "(?:%s)" % (")|(?:".join(bigpat))
332 r = re.compile(s)
333 self.ignorefunc = r.search
334 else:
335 self.ignorefunc = util.never
336
337 return self.ignorefunc(f)
338
339 def __del__(self):
340 if self.dirty:
341 self.write()
342
343 def __getitem__(self, key):
344 try:
345 return self.map[key]
346 except TypeError:
347 self.read()
348 return self[key]
349
350 def __contains__(self, key):
351 if not self.map: self.read()
352 return key in self.map
353
354 def parents(self):
355 if not self.pl:
356 self.read()
357 return self.pl
358
359 def markdirty(self):
360 if not self.dirty:
361 self.dirty = 1
362
363 def setparents(self, p1, p2=nullid):
364 self.markdirty()
365 self.pl = p1, p2
366
367 def state(self, key):
368 try:
369 return self[key][0]
370 except KeyError:
371 return "?"
372
373 def read(self):
374 if self.map is not None: return self.map
375
376 self.map = {}
377 self.pl = [nullid, nullid]
378 try:
379 st = self.opener("dirstate").read()
380 if not st: return
381 except: return
382
383 self.pl = [st[:20], st[20: 40]]
384
385 pos = 40
386 while pos < len(st):
387 e = struct.unpack(">cllll", st[pos:pos+17])
388 l = e[4]
389 pos += 17
390 f = st[pos:pos + l]
391 if '\0' in f:
392 f, c = f.split('\0')
393 self.copies[f] = c
394 self.map[f] = e[:4]
395 pos += l
396
397 def copy(self, source, dest):
398 self.read()
399 self.markdirty()
400 self.copies[dest] = source
401
402 def copied(self, file):
403 return self.copies.get(file, None)
404
405 def update(self, files, state, **kw):
406 ''' current states:
407 n normal
408 m needs merging
409 r marked for removal
410 a marked for addition'''
411
412 if not files: return
413 self.read()
414 self.markdirty()
415 for f in files:
416 if state == "r":
417 self.map[f] = ('r', 0, 0, 0)
418 else:
419 s = os.stat(os.path.join(self.root, f))
420 st_size = kw.get('st_size', s.st_size)
421 st_mtime = kw.get('st_mtime', s.st_mtime)
422 self.map[f] = (state, s.st_mode, st_size, st_mtime)
423
424 def forget(self, files):
425 if not files: return
426 self.read()
427 self.markdirty()
428 for f in files:
429 try:
430 del self.map[f]
431 except KeyError:
432 self.ui.warn("not in dirstate: %s!\n" % f)
433 pass
434
435 def clear(self):
436 self.map = {}
437 self.markdirty()
438
439 def write(self):
440 st = self.opener("dirstate", "w")
441 st.write("".join(self.pl))
442 for f, e in self.map.items():
443 c = self.copied(f)
444 if c:
445 f = f + "\0" + c
446 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
447 st.write(e + f)
448 self.dirty = 0
449
450 def filterfiles(self, files):
451 ret = {}
452 unknown = []
453
454 for x in files:
455 if x is '.':
456 return self.map.copy()
457 if x not in self.map:
458 unknown.append(x)
459 else:
460 ret[x] = self.map[x]
461
462 if not unknown:
463 return ret
464
465 b = self.map.keys()
466 b.sort()
467 blen = len(b)
468
469 for x in unknown:
470 bs = bisect.bisect(b, x)
471 if bs != 0 and b[bs-1] == x:
472 ret[x] = self.map[x]
473 continue
474 while bs < blen:
475 s = b[bs]
476 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
477 ret[s] = self.map[s]
478 else:
479 break
480 bs += 1
481 return ret
482
483 def walk(self, files=None, match=util.always, dc=None):
484 self.read()
485
486 # walk all files by default
487 if not files:
488 files = [self.root]
489 if not dc:
490 dc = self.map.copy()
491 elif not dc:
492 dc = self.filterfiles(files)
493
494 known = {'.hg': 1}
495 def seen(fn):
496 if fn in known: return True
497 known[fn] = 1
498 def traverse():
499 for ff in util.unique(files):
500 f = os.path.join(self.root, ff)
501 try:
502 st = os.stat(f)
503 except OSError, inst:
504 if ff not in dc: self.ui.warn('%s: %s\n' % (
505 util.pathto(self.getcwd(), ff),
506 inst.strerror))
507 continue
508 if stat.S_ISDIR(st.st_mode):
509 for dir, subdirs, fl in os.walk(f):
510 d = dir[len(self.root) + 1:]
511 nd = util.normpath(d)
512 if nd == '.': nd = ''
513 if seen(nd):
514 subdirs[:] = []
515 continue
516 for sd in subdirs:
517 ds = os.path.join(nd, sd +'/')
518 if self.ignore(ds) or not match(ds):
519 subdirs.remove(sd)
520 subdirs.sort()
521 fl.sort()
522 for fn in fl:
523 fn = util.pconvert(os.path.join(d, fn))
524 yield 'f', fn
525 elif stat.S_ISREG(st.st_mode):
526 yield 'f', ff
527 else:
528 kind = 'unknown'
529 if stat.S_ISCHR(st.st_mode): kind = 'character device'
530 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
531 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
532 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
533 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
534 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
535 util.pathto(self.getcwd(), ff),
536 kind))
537
538 ks = dc.keys()
539 ks.sort()
540 for k in ks:
541 yield 'm', k
542
543 # yield only files that match: all in dirstate, others only if
544 # not in .hgignore
545
546 for src, fn in util.unique(traverse()):
547 fn = util.normpath(fn)
548 if seen(fn): continue
549 if fn not in dc and self.ignore(fn):
550 continue
551 if match(fn):
552 yield src, fn
553
554 def changes(self, files=None, match=util.always):
555 self.read()
556 if not files:
557 dc = self.map.copy()
558 else:
559 dc = self.filterfiles(files)
560 lookup, modified, added, unknown = [], [], [], []
561 removed, deleted = [], []
562
563 for src, fn in self.walk(files, match, dc=dc):
564 try:
565 s = os.stat(os.path.join(self.root, fn))
566 except OSError:
567 continue
568 if not stat.S_ISREG(s.st_mode):
569 continue
570 c = dc.get(fn)
571 if c:
572 del dc[fn]
573 if c[0] == 'm':
574 modified.append(fn)
575 elif c[0] == 'a':
576 added.append(fn)
577 elif c[0] == 'r':
578 unknown.append(fn)
579 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
580 modified.append(fn)
581 elif c[3] != s.st_mtime:
582 lookup.append(fn)
583 else:
584 unknown.append(fn)
585
586 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
587 if c[0] == 'r':
588 removed.append(fn)
589 else:
590 deleted.append(fn)
591 return (lookup, modified, added, removed + deleted, unknown)
592
593 # used to avoid circular references so destructors work
594 def opener(base):
595 p = base
596 def o(path, mode="r"):
597 if p.startswith("http://"):
598 f = os.path.join(p, urllib.quote(path))
599 return httprangereader.httprangereader(f)
600
601 f = os.path.join(p, path)
602
603 mode += "b" # for that other OS
604
605 if mode[0] != "r":
606 try:
607 s = os.stat(f)
608 except OSError:
609 d = os.path.dirname(f)
610 if not os.path.isdir(d):
611 os.makedirs(d)
612 else:
613 if s.st_nlink > 1:
614 file(f + ".tmp", "wb").write(file(f, "rb").read())
615 util.rename(f+".tmp", f)
616
617 return file(f, mode)
618
619 return o
620
621 class RepoError(Exception): pass
622
623 class localrepository:
624 def __init__(self, ui, path=None, create=0):
625 self.remote = 0
626 if path and path.startswith("http://"):
627 self.remote = 1
628 self.path = path
629 else:
630 if not path:
631 p = os.getcwd()
632 while not os.path.isdir(os.path.join(p, ".hg")):
633 oldp = p
634 p = os.path.dirname(p)
635 if p == oldp: raise RepoError("no repo found")
636 path = p
637 self.path = os.path.join(path, ".hg")
638
639 if not create and not os.path.isdir(self.path):
640 raise RepoError("repository %s not found" % self.path)
641
642 self.root = os.path.abspath(path)
643 self.ui = ui
644
645 if create:
646 os.mkdir(self.path)
647 os.mkdir(self.join("data"))
648
649 self.opener = opener(self.path)
650 self.wopener = opener(self.root)
651 self.manifest = manifest(self.opener)
652 self.changelog = changelog(self.opener)
653 self.tagscache = None
654 self.nodetagscache = None
655
656 if not self.remote:
657 self.dirstate = dirstate(self.opener, ui, self.root)
658 try:
659 self.ui.readconfig(self.opener("hgrc"))
660 except IOError: pass
661
662 def hook(self, name, **args):
663 s = self.ui.config("hooks", name)
664 if s:
665 self.ui.note("running hook %s: %s\n" % (name, s))
666 old = {}
667 for k, v in args.items():
668 k = k.upper()
669 old[k] = os.environ.get(k, None)
670 os.environ[k] = v
671
672 r = os.system(s)
673
674 for k, v in old.items():
675 if v != None:
676 os.environ[k] = v
677 else:
678 del os.environ[k]
679
680 if r:
681 self.ui.warn("abort: %s hook failed with status %d!\n" %
682 (name, r))
683 return False
684 return True
685
686 def tags(self):
687 '''return a mapping of tag to node'''
688 if not self.tagscache:
689 self.tagscache = {}
690 def addtag(self, k, n):
691 try:
692 bin_n = bin(n)
693 except TypeError:
694 bin_n = ''
695 self.tagscache[k.strip()] = bin_n
696
697 try:
698 # read each head of the tags file, ending with the tip
699 # and add each tag found to the map, with "newer" ones
700 # taking precedence
701 fl = self.file(".hgtags")
702 h = fl.heads()
703 h.reverse()
704 for r in h:
705 for l in fl.read(r).splitlines():
706 if l:
707 n, k = l.split(" ", 1)
708 addtag(self, k, n)
709 except KeyError:
710 pass
711
712 try:
713 f = self.opener("localtags")
714 for l in f:
715 n, k = l.split(" ", 1)
716 addtag(self, k, n)
717 except IOError:
718 pass
719
720 self.tagscache['tip'] = self.changelog.tip()
721
722 return self.tagscache
723
724 def tagslist(self):
725 '''return a list of tags ordered by revision'''
726 l = []
727 for t, n in self.tags().items():
728 try:
729 r = self.changelog.rev(n)
730 except:
731 r = -2 # sort to the beginning of the list if unknown
732 l.append((r,t,n))
733 l.sort()
734 return [(t,n) for r,t,n in l]
735
736 def nodetags(self, node):
737 '''return the tags associated with a node'''
738 if not self.nodetagscache:
739 self.nodetagscache = {}
740 for t,n in self.tags().items():
741 self.nodetagscache.setdefault(n,[]).append(t)
742 return self.nodetagscache.get(node, [])
743
744 def lookup(self, key):
745 try:
746 return self.tags()[key]
747 except KeyError:
748 try:
749 return self.changelog.lookup(key)
750 except:
751 raise RepoError("unknown revision '%s'" % key)
752
753 def dev(self):
754 if self.remote: return -1
755 return os.stat(self.path).st_dev
756
757 def local(self):
758 return not self.remote
759
760 def join(self, f):
761 return os.path.join(self.path, f)
762
763 def wjoin(self, f):
764 return os.path.join(self.root, f)
765
766 def file(self, f):
767 if f[0] == '/': f = f[1:]
768 return filelog(self.opener, f)
769
770 def getcwd(self):
771 return self.dirstate.getcwd()
772
773 def wfile(self, f, mode='r'):
774 return self.wopener(f, mode)
775
776 def wread(self, filename):
777 return self.wopener(filename, 'r').read()
778
779 def wwrite(self, filename, data, fd=None):
780 if fd:
781 return fd.write(data)
782 return self.wopener(filename, 'w').write(data)
783
784 def transaction(self):
785 # save dirstate for undo
786 try:
787 ds = self.opener("dirstate").read()
788 except IOError:
789 ds = ""
790 self.opener("journal.dirstate", "w").write(ds)
791
792 def after():
793 util.rename(self.join("journal"), self.join("undo"))
794 util.rename(self.join("journal.dirstate"),
795 self.join("undo.dirstate"))
796
797 return transaction.transaction(self.ui.warn, self.opener,
798 self.join("journal"), after)
799
800 def recover(self):
801 lock = self.lock()
802 if os.path.exists(self.join("journal")):
803 self.ui.status("rolling back interrupted transaction\n")
804 return transaction.rollback(self.opener, self.join("journal"))
805 else:
806 self.ui.warn("no interrupted transaction available\n")
807
808 def undo(self):
809 lock = self.lock()
810 if os.path.exists(self.join("undo")):
811 self.ui.status("rolling back last transaction\n")
812 transaction.rollback(self.opener, self.join("undo"))
813 self.dirstate = None
814 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
815 self.dirstate = dirstate(self.opener, self.ui, self.root)
816 else:
817 self.ui.warn("no undo information available\n")
818
819 def lock(self, wait=1):
820 try:
821 return lock.lock(self.join("lock"), 0)
822 except lock.LockHeld, inst:
823 if wait:
824 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
825 return lock.lock(self.join("lock"), wait)
826 raise inst
827
828 def rawcommit(self, files, text, user, date, p1=None, p2=None):
829 orig_parent = self.dirstate.parents()[0] or nullid
830 p1 = p1 or self.dirstate.parents()[0] or nullid
831 p2 = p2 or self.dirstate.parents()[1] or nullid
832 c1 = self.changelog.read(p1)
833 c2 = self.changelog.read(p2)
834 m1 = self.manifest.read(c1[0])
835 mf1 = self.manifest.readflags(c1[0])
836 m2 = self.manifest.read(c2[0])
837 changed = []
838
839 if orig_parent == p1:
840 update_dirstate = 1
841 else:
842 update_dirstate = 0
843
844 tr = self.transaction()
845 mm = m1.copy()
846 mfm = mf1.copy()
847 linkrev = self.changelog.count()
848 for f in files:
849 try:
850 t = self.wread(f)
851 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
852 r = self.file(f)
853 mfm[f] = tm
854
855 fp1 = m1.get(f, nullid)
856 fp2 = m2.get(f, nullid)
857
858 # is the same revision on two branches of a merge?
859 if fp2 == fp1:
860 fp2 = nullid
861
862 if fp2 != nullid:
863 # is one parent an ancestor of the other?
864 fpa = r.ancestor(fp1, fp2)
865 if fpa == fp1:
866 fp1, fp2 = fp2, nullid
867 elif fpa == fp2:
868 fp2 = nullid
869
870 # is the file unmodified from the parent?
871 if t == r.read(fp1):
872 # record the proper existing parent in manifest
873 # no need to add a revision
874 mm[f] = fp1
875 continue
876
877 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
878 changed.append(f)
879 if update_dirstate:
880 self.dirstate.update([f], "n")
881 except IOError:
882 try:
883 del mm[f]
884 del mfm[f]
885 if update_dirstate:
886 self.dirstate.forget([f])
887 except:
888 # deleted from p2?
889 pass
890
891 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
892 user = user or self.ui.username()
893 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
894 tr.close()
895 if update_dirstate:
896 self.dirstate.setparents(n, nullid)
897
898 def commit(self, files = None, text = "", user = None, date = None,
899 match = util.always, force=False):
900 commit = []
901 remove = []
902 changed = []
903
904 if files:
905 for f in files:
906 s = self.dirstate.state(f)
907 if s in 'nmai':
908 commit.append(f)
909 elif s == 'r':
910 remove.append(f)
911 else:
912 self.ui.warn("%s not tracked!\n" % f)
913 else:
914 (c, a, d, u) = self.changes(match=match)
915 commit = c + a
916 remove = d
917
918 p1, p2 = self.dirstate.parents()
919 c1 = self.changelog.read(p1)
920 c2 = self.changelog.read(p2)
921 m1 = self.manifest.read(c1[0])
922 mf1 = self.manifest.readflags(c1[0])
923 m2 = self.manifest.read(c2[0])
924
925 if not commit and not remove and not force and p2 == nullid:
926 self.ui.status("nothing changed\n")
927 return None
928
929 if not self.hook("precommit"):
930 return None
931
932 lock = self.lock()
933 tr = self.transaction()
934
935 # check in files
936 new = {}
937 linkrev = self.changelog.count()
938 commit.sort()
939 for f in commit:
940 self.ui.note(f + "\n")
941 try:
942 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
943 t = self.wread(f)
944 except IOError:
945 self.ui.warn("trouble committing %s!\n" % f)
946 raise
947
948 meta = {}
949 cp = self.dirstate.copied(f)
950 if cp:
951 meta["copy"] = cp
952 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
953 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
954
955 r = self.file(f)
956 fp1 = m1.get(f, nullid)
957 fp2 = m2.get(f, nullid)
958
959 # is the same revision on two branches of a merge?
960 if fp2 == fp1:
961 fp2 = nullid
962
963 if fp2 != nullid:
964 # is one parent an ancestor of the other?
965 fpa = r.ancestor(fp1, fp2)
966 if fpa == fp1:
967 fp1, fp2 = fp2, nullid
968 elif fpa == fp2:
969 fp2 = nullid
970
971 # is the file unmodified from the parent?
972 if not meta and t == r.read(fp1):
973 # record the proper existing parent in manifest
974 # no need to add a revision
975 new[f] = fp1
976 continue
977
978 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
979 # remember what we've added so that we can later calculate
980 # the files to pull from a set of changesets
981 changed.append(f)
982
983 # update manifest
984 m1.update(new)
985 for f in remove:
986 if f in m1:
987 del m1[f]
988 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
989 (new, remove))
990
991 # add changeset
992 new = new.keys()
993 new.sort()
994
995 if not text:
996 edittext = ""
997 if p2 != nullid:
998 edittext += "HG: branch merge\n"
999 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
1000 edittext += "".join(["HG: changed %s\n" % f for f in changed])
1001 edittext += "".join(["HG: removed %s\n" % f for f in remove])
1002 if not changed and not remove:
1003 edittext += "HG: no files changed\n"
1004 edittext = self.ui.edit(edittext)
1005 if not edittext.rstrip():
1006 return None
1007 text = edittext
1008
1009 user = user or self.ui.username()
1010 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
1011 tr.close()
1012
1013 self.dirstate.setparents(n)
1014 self.dirstate.update(new, "n")
1015 self.dirstate.forget(remove)
1016
1017 if not self.hook("commit", node=hex(n)):
1018 return None
1019 return n
1020
1021 def walk(self, node=None, files=[], match=util.always):
1022 if node:
1023 for fn in self.manifest.read(self.changelog.read(node)[0]):
1024 if match(fn): yield 'm', fn
1025 else:
1026 for src, fn in self.dirstate.walk(files, match):
1027 yield src, fn
1028
1029 def changes(self, node1 = None, node2 = None, files = [],
1030 match = util.always):
1031 mf2, u = None, []
1032
1033 def fcmp(fn, mf):
1034 t1 = self.wread(fn)
1035 t2 = self.file(fn).read(mf.get(fn, nullid))
1036 return cmp(t1, t2)
1037
1038 def mfmatches(node):
1039 mf = dict(self.manifest.read(node))
1040 for fn in mf.keys():
1041 if not match(fn):
1042 del mf[fn]
1043 return mf
1044
1045 # are we comparing the working directory?
1046 if not node2:
1047 l, c, a, d, u = self.dirstate.changes(files, match)
1048
1049 # are we comparing working dir against its parent?
1050 if not node1:
1051 if l:
1052 # do a full compare of any files that might have changed
1053 change = self.changelog.read(self.dirstate.parents()[0])
1054 mf2 = mfmatches(change[0])
1055 for f in l:
1056 if fcmp(f, mf2):
1057 c.append(f)
1058
1059 for l in c, a, d, u:
1060 l.sort()
1061
1062 return (c, a, d, u)
1063
1064 # are we comparing working dir against non-tip?
1065 # generate a pseudo-manifest for the working dir
1066 if not node2:
1067 if not mf2:
1068 change = self.changelog.read(self.dirstate.parents()[0])
1069 mf2 = mfmatches(change[0])
1070 for f in a + c + l:
1071 mf2[f] = ""
1072 for f in d:
1073 if f in mf2: del mf2[f]
1074 else:
1075 change = self.changelog.read(node2)
1076 mf2 = mfmatches(change[0])
1077
1078 # flush lists from dirstate before comparing manifests
1079 c, a = [], []
1080
1081 change = self.changelog.read(node1)
1082 mf1 = mfmatches(change[0])
1083
1084 for fn in mf2:
1085 if mf1.has_key(fn):
1086 if mf1[fn] != mf2[fn]:
1087 if mf2[fn] != "" or fcmp(fn, mf1):
1088 c.append(fn)
1089 del mf1[fn]
1090 else:
1091 a.append(fn)
1092
1093 d = mf1.keys()
1094
1095 for l in c, a, d, u:
1096 l.sort()
1097
1098 return (c, a, d, u)
1099
1100 def add(self, list):
1101 for f in list:
1102 p = self.wjoin(f)
1103 if not os.path.exists(p):
1104 self.ui.warn("%s does not exist!\n" % f)
1105 elif not os.path.isfile(p):
1106 self.ui.warn("%s not added: only files supported currently\n" % f)
1107 elif self.dirstate.state(f) in 'an':
1108 self.ui.warn("%s already tracked!\n" % f)
1109 else:
1110 self.dirstate.update([f], "a")
1111
1112 def forget(self, list):
1113 for f in list:
1114 if self.dirstate.state(f) not in 'ai':
1115 self.ui.warn("%s not added!\n" % f)
1116 else:
1117 self.dirstate.forget([f])
1118
1119 def remove(self, list):
1120 for f in list:
1121 p = self.wjoin(f)
1122 if os.path.exists(p):
1123 self.ui.warn("%s still exists!\n" % f)
1124 elif self.dirstate.state(f) == 'a':
1125 self.ui.warn("%s never committed!\n" % f)
1126 self.dirstate.forget([f])
1127 elif f not in self.dirstate:
1128 self.ui.warn("%s not tracked!\n" % f)
1129 else:
1130 self.dirstate.update([f], "r")
1131
1132 def copy(self, source, dest):
1133 p = self.wjoin(dest)
1134 if not os.path.exists(p):
1135 self.ui.warn("%s does not exist!\n" % dest)
1136 elif not os.path.isfile(p):
1137 self.ui.warn("copy failed: %s is not a file\n" % dest)
1138 else:
1139 if self.dirstate.state(dest) == '?':
1140 self.dirstate.update([dest], "a")
1141 self.dirstate.copy(source, dest)
1142
1143 def heads(self):
1144 return self.changelog.heads()
1145
1146 # branchlookup returns a dict giving a list of branches for
1147 # each head. A branch is defined as the tag of a node or
1148 # the branch of the node's parents. If a node has multiple
1149 # branch tags, tags are eliminated if they are visible from other
1150 # branch tags.
1151 #
1152 # So, for this graph: a->b->c->d->e
1153 # \ /
1154 # aa -----/
1155 # a has tag 2.6.12
1156 # d has tag 2.6.13
1157 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1158 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1159 # from the list.
1160 #
1161 # It is possible that more than one head will have the same branch tag.
1162 # callers need to check the result for multiple heads under the same
1163 # branch tag if that is a problem for them (ie checkout of a specific
1164 # branch).
1165 #
1166 # passing in a specific branch will limit the depth of the search
1167 # through the parents. It won't limit the branches returned in the
1168 # result though.
1169 def branchlookup(self, heads=None, branch=None):
1170 if not heads:
1171 heads = self.heads()
1172 headt = [ h for h in heads ]
1173 chlog = self.changelog
1174 branches = {}
1175 merges = []
1176 seenmerge = {}
1177
1178 # traverse the tree once for each head, recording in the branches
1179 # dict which tags are visible from this head. The branches
1180 # dict also records which tags are visible from each tag
1181 # while we traverse.
1182 while headt or merges:
1183 if merges:
1184 n, found = merges.pop()
1185 visit = [n]
1186 else:
1187 h = headt.pop()
1188 visit = [h]
1189 found = [h]
1190 seen = {}
1191 while visit:
1192 n = visit.pop()
1193 if n in seen:
1194 continue
1195 pp = chlog.parents(n)
1196 tags = self.nodetags(n)
1197 if tags:
1198 for x in tags:
1199 if x == 'tip':
1200 continue
1201 for f in found:
1202 branches.setdefault(f, {})[n] = 1
1203 branches.setdefault(n, {})[n] = 1
1204 break
1205 if n not in found:
1206 found.append(n)
1207 if branch in tags:
1208 continue
1209 seen[n] = 1
1210 if pp[1] != nullid and n not in seenmerge:
1211 merges.append((pp[1], [x for x in found]))
1212 seenmerge[n] = 1
1213 if pp[0] != nullid:
1214 visit.append(pp[0])
1215 # traverse the branches dict, eliminating branch tags from each
1216 # head that are visible from another branch tag for that head.
1217 out = {}
1218 viscache = {}
1219 for h in heads:
1220 def visible(node):
1221 if node in viscache:
1222 return viscache[node]
1223 ret = {}
1224 visit = [node]
1225 while visit:
1226 x = visit.pop()
1227 if x in viscache:
1228 ret.update(viscache[x])
1229 elif x not in ret:
1230 ret[x] = 1
1231 if x in branches:
1232 visit[len(visit):] = branches[x].keys()
1233 viscache[node] = ret
1234 return ret
1235 if h not in branches:
1236 continue
1237 # O(n^2), but somewhat limited. This only searches the
1238 # tags visible from a specific head, not all the tags in the
1239 # whole repo.
1240 for b in branches[h]:
1241 vis = False
1242 for bb in branches[h].keys():
1243 if b != bb:
1244 if b in visible(bb):
1245 vis = True
1246 break
1247 if not vis:
1248 l = out.setdefault(h, [])
1249 l[len(l):] = self.nodetags(b)
1250 return out
1251
1252 def branches(self, nodes):
1253 if not nodes: nodes = [self.changelog.tip()]
1254 b = []
1255 for n in nodes:
1256 t = n
1257 while n:
1258 p = self.changelog.parents(n)
1259 if p[1] != nullid or p[0] == nullid:
1260 b.append((t, n, p[0], p[1]))
1261 break
1262 n = p[0]
1263 return b
1264
1265 def between(self, pairs):
1266 r = []
1267
1268 for top, bottom in pairs:
1269 n, l, i = top, [], 0
1270 f = 1
1271
1272 while n != bottom:
1273 p = self.changelog.parents(n)[0]
1274 if i == f:
1275 l.append(n)
1276 f = f * 2
1277 n = p
1278 i += 1
1279
1280 r.append(l)
1281
1282 return r
1283
1284 def newer(self, nodes):
1285 m = {}
1286 nl = []
1287 pm = {}
1288 cl = self.changelog
1289 t = l = cl.count()
1290
1291 # find the lowest numbered node
1292 for n in nodes:
1293 l = min(l, cl.rev(n))
1294 m[n] = 1
1295
1296 for i in xrange(l, t):
1297 n = cl.node(i)
1298 if n in m: # explicitly listed
1299 pm[n] = 1
1300 nl.append(n)
1301 continue
1302 for p in cl.parents(n):
1303 if p in pm: # parent listed
1304 pm[n] = 1
1305 nl.append(n)
1306 break
1307
1308 return nl
1309
1310 def findincoming(self, remote, base=None, heads=None):
1311 m = self.changelog.nodemap
1312 search = []
1313 fetch = {}
1314 seen = {}
1315 seenbranch = {}
1316 if base == None:
1317 base = {}
1318
1319 # assume we're closer to the tip than the root
1320 # and start by examining the heads
1321 self.ui.status("searching for changes\n")
1322
1323 if not heads:
1324 heads = remote.heads()
1325
1326 unknown = []
1327 for h in heads:
1328 if h not in m:
1329 unknown.append(h)
1330 else:
1331 base[h] = 1
1332
1333 if not unknown:
1334 return None
1335
1336 rep = {}
1337 reqcnt = 0
1338
1339 # search through remote branches
1340 # a 'branch' here is a linear segment of history, with four parts:
1341 # head, root, first parent, second parent
1342 # (a branch always has two parents (or none) by definition)
1343 unknown = remote.branches(unknown)
1344 while unknown:
1345 r = []
1346 while unknown:
1347 n = unknown.pop(0)
1348 if n[0] in seen:
1349 continue
1350
1351 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1352 if n[0] == nullid:
1353 break
1354 if n in seenbranch:
1355 self.ui.debug("branch already found\n")
1356 continue
1357 if n[1] and n[1] in m: # do we know the base?
1358 self.ui.debug("found incomplete branch %s:%s\n"
1359 % (short(n[0]), short(n[1])))
1360 search.append(n) # schedule branch range for scanning
1361 seenbranch[n] = 1
1362 else:
1363 if n[1] not in seen and n[1] not in fetch:
1364 if n[2] in m and n[3] in m:
1365 self.ui.debug("found new changeset %s\n" %
1366 short(n[1]))
1367 fetch[n[1]] = 1 # earliest unknown
1368 base[n[2]] = 1 # latest known
1369 continue
1370
1371 for a in n[2:4]:
1372 if a not in rep:
1373 r.append(a)
1374 rep[a] = 1
1375
1376 seen[n[0]] = 1
1377
1378 if r:
1379 reqcnt += 1
1380 self.ui.debug("request %d: %s\n" %
1381 (reqcnt, " ".join(map(short, r))))
1382 for p in range(0, len(r), 10):
1383 for b in remote.branches(r[p:p+10]):
1384 self.ui.debug("received %s:%s\n" %
1385 (short(b[0]), short(b[1])))
1386 if b[0] in m:
1387 self.ui.debug("found base node %s\n" % short(b[0]))
1388 base[b[0]] = 1
1389 elif b[0] not in seen:
1390 unknown.append(b)
1391
1392 # do binary search on the branches we found
1393 while search:
1394 n = search.pop(0)
1395 reqcnt += 1
1396 l = remote.between([(n[0], n[1])])[0]
1397 l.append(n[1])
1398 p = n[0]
1399 f = 1
1400 for i in l:
1401 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1402 if i in m:
1403 if f <= 2:
1404 self.ui.debug("found new branch changeset %s\n" %
1405 short(p))
1406 fetch[p] = 1
1407 base[i] = 1
1408 else:
1409 self.ui.debug("narrowed branch search to %s:%s\n"
1410 % (short(p), short(i)))
1411 search.append((p, i))
1412 break
1413 p, f = i, f * 2
1414
1415 # sanity check our fetch list
1416 for f in fetch.keys():
1417 if f in m:
1418 raise RepoError("already have changeset " + short(f[:4]))
1419
1420 if base.keys() == [nullid]:
1421 self.ui.warn("warning: pulling from an unrelated repository!\n")
1422
1423 self.ui.note("found new changesets starting at " +
1424 " ".join([short(f) for f in fetch]) + "\n")
1425
1426 self.ui.debug("%d total queries\n" % reqcnt)
1427
1428 return fetch.keys()
1429
1430 def findoutgoing(self, remote, base=None, heads=None):
1431 if base == None:
1432 base = {}
1433 self.findincoming(remote, base, heads)
1434
1435 self.ui.debug("common changesets up to "
1436 + " ".join(map(short, base.keys())) + "\n")
1437
1438 remain = dict.fromkeys(self.changelog.nodemap)
1439
1440 # prune everything remote has from the tree
1441 del remain[nullid]
1442 remove = base.keys()
1443 while remove:
1444 n = remove.pop(0)
1445 if n in remain:
1446 del remain[n]
1447 for p in self.changelog.parents(n):
1448 remove.append(p)
1449
1450 # find every node whose parents have been pruned
1451 subset = []
1452 for n in remain:
1453 p1, p2 = self.changelog.parents(n)
1454 if p1 not in remain and p2 not in remain:
1455 subset.append(n)
1456
1457 # this is the set of all roots we have to push
1458 return subset
1459
1460 def pull(self, remote):
1461 lock = self.lock()
1462
1463 # if we have an empty repo, fetch everything
1464 if self.changelog.tip() == nullid:
1465 self.ui.status("requesting all changes\n")
1466 fetch = [nullid]
1467 else:
1468 fetch = self.findincoming(remote)
1469
1470 if not fetch:
1471 self.ui.status("no changes found\n")
1472 return 1
1473
1474 cg = remote.changegroup(fetch)
1475 return self.addchangegroup(cg)
1476
1477 def push(self, remote, force=False):
1478 lock = remote.lock()
1479
1480 base = {}
1481 heads = remote.heads()
1482 inc = self.findincoming(remote, base, heads)
1483 if not force and inc:
1484 self.ui.warn("abort: unsynced remote changes!\n")
1485 self.ui.status("(did you forget to sync? use push -f to force)\n")
1486 return 1
1487
1488 update = self.findoutgoing(remote, base)
1489 if not update:
1490 self.ui.status("no changes found\n")
1491 return 1
1492 elif not force:
1493 if len(heads) < len(self.changelog.heads()):
1494 self.ui.warn("abort: push creates new remote branches!\n")
1495 self.ui.status("(did you forget to merge?" +
1496 " use push -f to force)\n")
1497 return 1
1498
1499 cg = self.changegroup(update)
1500 return remote.addchangegroup(cg)
1501
1502 def changegroup(self, basenodes):
1503 class genread:
1504 def __init__(self, generator):
1505 self.g = generator
1506 self.buf = ""
1507 def fillbuf(self):
1508 self.buf += "".join(self.g)
1509
1510 def read(self, l):
1511 while l > len(self.buf):
1512 try:
1513 self.buf += self.g.next()
1514 except StopIteration:
1515 break
1516 d, self.buf = self.buf[:l], self.buf[l:]
1517 return d
1518
1519 def gengroup():
1520 nodes = self.newer(basenodes)
1521
1522 # construct the link map
1523 linkmap = {}
1524 for n in nodes:
1525 linkmap[self.changelog.rev(n)] = n
1526
1527 # construct a list of all changed files
1528 changed = {}
1529 for n in nodes:
1530 c = self.changelog.read(n)
1531 for f in c[3]:
1532 changed[f] = 1
1533 changed = changed.keys()
1534 changed.sort()
1535
1536 # the changegroup is changesets + manifests + all file revs
1537 revs = [ self.changelog.rev(n) for n in nodes ]
1538
1539 for y in self.changelog.group(linkmap): yield y
1540 for y in self.manifest.group(linkmap): yield y
1541 for f in changed:
1542 yield struct.pack(">l", len(f) + 4) + f
1543 g = self.file(f).group(linkmap)
1544 for y in g:
1545 yield y
1546
1547 yield struct.pack(">l", 0)
1548
1549 return genread(gengroup())
1550
1551 def addchangegroup(self, source):
1552
1553 def getchunk():
1554 d = source.read(4)
1555 if not d: return ""
1556 l = struct.unpack(">l", d)[0]
1557 if l <= 4: return ""
1558 return source.read(l - 4)
1559
1560 def getgroup():
1561 while 1:
1562 c = getchunk()
1563 if not c: break
1564 yield c
1565
1566 def csmap(x):
1567 self.ui.debug("add changeset %s\n" % short(x))
1568 return self.changelog.count()
1569
1570 def revmap(x):
1571 return self.changelog.rev(x)
1572
1573 if not source: return
1574 changesets = files = revisions = 0
1575
1576 tr = self.transaction()
1577
1578 oldheads = len(self.changelog.heads())
1579
1580 # pull off the changeset group
1581 self.ui.status("adding changesets\n")
1582 co = self.changelog.tip()
1583 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1584 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1585
1586 # pull off the manifest group
1587 self.ui.status("adding manifests\n")
1588 mm = self.manifest.tip()
1589 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1590
1591 # process the files
1592 self.ui.status("adding file changes\n")
1593 while 1:
1594 f = getchunk()
1595 if not f: break
1596 self.ui.debug("adding %s revisions\n" % f)
1597 fl = self.file(f)
1598 o = fl.count()
1599 n = fl.addgroup(getgroup(), revmap, tr)
1600 revisions += fl.count() - o
1601 files += 1
1602
1603 newheads = len(self.changelog.heads())
1604 heads = ""
1605 if oldheads and newheads > oldheads:
1606 heads = " (+%d heads)" % (newheads - oldheads)
1607
1608 self.ui.status(("added %d changesets" +
1609 " with %d changes to %d files%s\n")
1610 % (changesets, revisions, files, heads))
1611
1612 tr.close()
1613
1614 if not self.hook("changegroup"):
1615 return 1
1616
1617 return
1618
1619 def update(self, node, allow=False, force=False, choose=None,
1620 moddirstate=True):
1621 pl = self.dirstate.parents()
1622 if not force and pl[1] != nullid:
1623 self.ui.warn("aborting: outstanding uncommitted merges\n")
1624 return 1
1625
1626 p1, p2 = pl[0], node
1627 pa = self.changelog.ancestor(p1, p2)
1628 m1n = self.changelog.read(p1)[0]
1629 m2n = self.changelog.read(p2)[0]
1630 man = self.manifest.ancestor(m1n, m2n)
1631 m1 = self.manifest.read(m1n)
1632 mf1 = self.manifest.readflags(m1n)
1633 m2 = self.manifest.read(m2n)
1634 mf2 = self.manifest.readflags(m2n)
1635 ma = self.manifest.read(man)
1636 mfa = self.manifest.readflags(man)
1637
1638 (c, a, d, u) = self.changes()
1639
1640 # is this a jump, or a merge? i.e. is there a linear path
1641 # from p1 to p2?
1642 linear_path = (pa == p1 or pa == p2)
1643
1644 # resolve the manifest to determine which files
1645 # we care about merging
1646 self.ui.note("resolving manifests\n")
1647 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1648 (force, allow, moddirstate, linear_path))
1649 self.ui.debug(" ancestor %s local %s remote %s\n" %
1650 (short(man), short(m1n), short(m2n)))
1651
1652 merge = {}
1653 get = {}
1654 remove = []
1655
1656 # construct a working dir manifest
1657 mw = m1.copy()
1658 mfw = mf1.copy()
1659 umap = dict.fromkeys(u)
1660
1661 for f in a + c + u:
1662 mw[f] = ""
1663 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1664
1665 for f in d:
1666 if f in mw: del mw[f]
1667
1668 # If we're jumping between revisions (as opposed to merging),
1669 # and if neither the working directory nor the target rev has
1670 # the file, then we need to remove it from the dirstate, to
1671 # prevent the dirstate from listing the file when it is no
1672 # longer in the manifest.
1673 if moddirstate and linear_path and f not in m2:
1674 self.dirstate.forget((f,))
1675
1676 # Compare manifests
1677 for f, n in mw.iteritems():
1678 if choose and not choose(f): continue
1679 if f in m2:
1680 s = 0
1681
1682 # is the wfile new since m1, and match m2?
1683 if f not in m1:
1684 t1 = self.wread(f)
1685 t2 = self.file(f).read(m2[f])
1686 if cmp(t1, t2) == 0:
1687 n = m2[f]
1688 del t1, t2
1689
1690 # are files different?
1691 if n != m2[f]:
1692 a = ma.get(f, nullid)
1693 # are both different from the ancestor?
1694 if n != a and m2[f] != a:
1695 self.ui.debug(" %s versions differ, resolve\n" % f)
1696 # merge executable bits
1697 # "if we changed or they changed, change in merge"
1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1699 mode = ((a^b) | (a^c)) ^ a
1700 merge[f] = (m1.get(f, nullid), m2[f], mode)
1701 s = 1
1702 # are we clobbering?
1703 # is remote's version newer?
1704 # or are we going back in time?
1705 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1706 self.ui.debug(" remote %s is newer, get\n" % f)
1707 get[f] = m2[f]
1708 s = 1
1709 elif f in umap:
1710 # this unknown file is the same as the checkout
1711 get[f] = m2[f]
1712
1713 if not s and mfw[f] != mf2[f]:
1714 if force:
1715 self.ui.debug(" updating permissions for %s\n" % f)
1716 util.set_exec(self.wjoin(f), mf2[f])
1717 else:
1718 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1719 mode = ((a^b) | (a^c)) ^ a
1720 if mode != b:
1721 self.ui.debug(" updating permissions for %s\n" % f)
1722 util.set_exec(self.wjoin(f), mode)
1723 del m2[f]
1724 elif f in ma:
1725 if n != ma[f]:
1726 r = "d"
1727 if not force and (linear_path or allow):
1728 r = self.ui.prompt(
1729 (" local changed %s which remote deleted\n" % f) +
1730 "(k)eep or (d)elete?", "[kd]", "k")
1731 if r == "d":
1732 remove.append(f)
1733 else:
1734 self.ui.debug("other deleted %s\n" % f)
1735 remove.append(f) # other deleted it
1736 else:
1737 if n == m1.get(f, nullid): # same as parent
1738 if p2 == pa: # going backwards?
1739 self.ui.debug("remote deleted %s\n" % f)
1740 remove.append(f)
1741 else:
1742 self.ui.debug("local created %s, keeping\n" % f)
1743 else:
1744 self.ui.debug("working dir created %s, keeping\n" % f)
1745
1746 for f, n in m2.iteritems():
1747 if choose and not choose(f): continue
1748 if f[0] == "/": continue
1749 if f in ma and n != ma[f]:
1750 r = "k"
1751 if not force and (linear_path or allow):
1752 r = self.ui.prompt(
1753 ("remote changed %s which local deleted\n" % f) +
1754 "(k)eep or (d)elete?", "[kd]", "k")
1755 if r == "k": get[f] = n
1756 elif f not in ma:
1757 self.ui.debug("remote created %s\n" % f)
1758 get[f] = n
1759 else:
1760 if force or p2 == pa: # going backwards?
1761 self.ui.debug("local deleted %s, recreating\n" % f)
1762 get[f] = n
1763 else:
1764 self.ui.debug("local deleted %s\n" % f)
1765
1766 del mw, m1, m2, ma
1767
1768 if force:
1769 for f in merge:
1770 get[f] = merge[f][1]
1771 merge = {}
1772
1773 if linear_path or force:
1774 # we don't need to do any magic, just jump to the new rev
1775 branch_merge = False
1776 p1, p2 = p2, nullid
1777 else:
1778 if not allow:
1779 self.ui.status("this update spans a branch" +
1780 " affecting the following files:\n")
1781 fl = merge.keys() + get.keys()
1782 fl.sort()
1783 for f in fl:
1784 cf = ""
1785 if f in merge: cf = " (resolve)"
1786 self.ui.status(" %s%s\n" % (f, cf))
1787 self.ui.warn("aborting update spanning branches!\n")
1788 self.ui.status("(use update -m to merge across branches" +
1789 " or -C to lose changes)\n")
1790 return 1
1791 branch_merge = True
1792
1793 if moddirstate:
1794 self.dirstate.setparents(p1, p2)
1795
1796 # get the files we don't need to change
1797 files = get.keys()
1798 files.sort()
1799 for f in files:
1800 if f[0] == "/": continue
1801 self.ui.note("getting %s\n" % f)
1802 t = self.file(f).read(get[f])
1803 try:
1804 self.wwrite(f, t)
1805 except IOError:
1806 os.makedirs(os.path.dirname(self.wjoin(f)))
1807 self.wwrite(f, t)
1808 util.set_exec(self.wjoin(f), mf2[f])
1809 if moddirstate:
1810 if branch_merge:
1811 self.dirstate.update([f], 'n', st_mtime=-1)
1812 else:
1813 self.dirstate.update([f], 'n')
1814
1815 # merge the tricky bits
1816 files = merge.keys()
1817 files.sort()
1818 for f in files:
1819 self.ui.status("merging %s\n" % f)
1820 my, other, flag = merge[f]
1821 self.merge3(f, my, other)
1822 util.set_exec(self.wjoin(f), flag)
1823 if moddirstate:
1824 if branch_merge:
1825 # We've done a branch merge, mark this file as merged
1826 # so that we properly record the merger later
1827 self.dirstate.update([f], 'm')
1828 else:
1829 # We've update-merged a locally modified file, so
1830 # we set the dirstate to emulate a normal checkout
1831 # of that file some time in the past. Thus our
1832 # merge will appear as a normal local file
1833 # modification.
1834 f_len = len(self.file(f).read(other))
1835 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1836
1837 remove.sort()
1838 for f in remove:
1839 self.ui.note("removing %s\n" % f)
1840 try:
1841 os.unlink(self.wjoin(f))
1842 except OSError, inst:
1843 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1844 # try removing directories that might now be empty
1845 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1846 except: pass
1847 if moddirstate:
1848 if branch_merge:
1849 self.dirstate.update(remove, 'r')
1850 else:
1851 self.dirstate.forget(remove)
1852
1853 def merge3(self, fn, my, other):
1854 """perform a 3-way merge in the working directory"""
1855
1856 def temp(prefix, node):
1857 pre = "%s~%s." % (os.path.basename(fn), prefix)
1858 (fd, name) = tempfile.mkstemp("", pre)
1859 f = os.fdopen(fd, "wb")
1860 self.wwrite(fn, fl.read(node), f)
1861 f.close()
1862 return name
1863
1864 fl = self.file(fn)
1865 base = fl.ancestor(my, other)
1866 a = self.wjoin(fn)
1867 b = temp("base", base)
1868 c = temp("other", other)
1869
1870 self.ui.note("resolving %s\n" % fn)
1871 self.ui.debug("file %s: other %s ancestor %s\n" %
1872 (fn, short(other), short(base)))
1873
1874 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1875 or "hgmerge")
1876 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1877 if r:
1878 self.ui.warn("merging %s failed!\n" % fn)
1879
1880 os.unlink(b)
1881 os.unlink(c)
1882
1883 def verify(self):
1884 filelinkrevs = {}
1885 filenodes = {}
1886 changesets = revisions = files = 0
1887 errors = 0
1888
1889 seen = {}
1890 self.ui.status("checking changesets\n")
1891 for i in range(self.changelog.count()):
1892 changesets += 1
1893 n = self.changelog.node(i)
1894 if n in seen:
1895 self.ui.warn("duplicate changeset at revision %d\n" % i)
1896 errors += 1
1897 seen[n] = 1
1898
1899 for p in self.changelog.parents(n):
1900 if p not in self.changelog.nodemap:
1901 self.ui.warn("changeset %s has unknown parent %s\n" %
1902 (short(n), short(p)))
1903 errors += 1
1904 try:
1905 changes = self.changelog.read(n)
1906 except Exception, inst:
1907 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1908 errors += 1
1909
1910 for f in changes[3]:
1911 filelinkrevs.setdefault(f, []).append(i)
1912
1913 seen = {}
1914 self.ui.status("checking manifests\n")
1915 for i in range(self.manifest.count()):
1916 n = self.manifest.node(i)
1917 if n in seen:
1918 self.ui.warn("duplicate manifest at revision %d\n" % i)
1919 errors += 1
1920 seen[n] = 1
1921
1922 for p in self.manifest.parents(n):
1923 if p not in self.manifest.nodemap:
1924 self.ui.warn("manifest %s has unknown parent %s\n" %
1925 (short(n), short(p)))
1926 errors += 1
1927
1928 try:
1929 delta = mdiff.patchtext(self.manifest.delta(n))
1930 except KeyboardInterrupt:
1931 self.ui.warn("aborted")
1932 sys.exit(0)
1933 except Exception, inst:
1934 self.ui.warn("unpacking manifest %s: %s\n"
1935 % (short(n), inst))
1936 errors += 1
1937
1938 ff = [ l.split('\0') for l in delta.splitlines() ]
1939 for f, fn in ff:
1940 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1941
1942 self.ui.status("crosschecking files in changesets and manifests\n")
1943 for f in filenodes:
1944 if f not in filelinkrevs:
1945 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1946 errors += 1
1947
1948 for f in filelinkrevs:
1949 if f not in filenodes:
1950 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1951 errors += 1
1952
1953 self.ui.status("checking files\n")
1954 ff = filenodes.keys()
1955 ff.sort()
1956 for f in ff:
1957 if f == "/dev/null": continue
1958 files += 1
1959 fl = self.file(f)
1960 nodes = { nullid: 1 }
1961 seen = {}
1962 for i in range(fl.count()):
1963 revisions += 1
1964 n = fl.node(i)
1965
1966 if n in seen:
1967 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1968 errors += 1
1969
1970 if n not in filenodes[f]:
1971 self.ui.warn("%s: %d:%s not in manifests\n"
1972 % (f, i, short(n)))
1973 errors += 1
1974 else:
1975 del filenodes[f][n]
1976
1977 flr = fl.linkrev(n)
1978 if flr not in filelinkrevs[f]:
1979 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1980 % (f, short(n), fl.linkrev(n)))
1981 errors += 1
1982 else:
1983 filelinkrevs[f].remove(flr)
1984
1985 # verify contents
1986 try:
1987 t = fl.read(n)
1988 except Exception, inst:
1989 self.ui.warn("unpacking file %s %s: %s\n"
1990 % (f, short(n), inst))
1991 errors += 1
1992
1993 # verify parents
1994 (p1, p2) = fl.parents(n)
1995 if p1 not in nodes:
1996 self.ui.warn("file %s:%s unknown parent 1 %s" %
1997 (f, short(n), short(p1)))
1998 errors += 1
1999 if p2 not in nodes:
2000 self.ui.warn("file %s:%s unknown parent 2 %s" %
2001 (f, short(n), short(p1)))
2002 errors += 1
2003 nodes[n] = 1
2004
2005 # cross-check
2006 for node in filenodes[f]:
2007 self.ui.warn("node %s in manifests not in %s\n"
2008 % (hex(node), f))
2009 errors += 1
2010
2011 self.ui.status("%d files, %d changesets, %d total revisions\n" %
2012 (files, changesets, revisions))
2013
2014 if errors:
2015 self.ui.warn("%d integrity errors encountered!\n" % errors)
2016 return 1
2017
2018 class remoterepository:
2019 def local(self):
2020 return False
2021
2022 class httprepository(remoterepository):
2023 def __init__(self, ui, path):
2024 # fix missing / after hostname
2025 s = urlparse.urlsplit(path)
2026 partial = s[2]
2027 if not partial: partial = "/"
2028 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
2029 self.ui = ui
2030 no_list = [ "localhost", "127.0.0.1" ]
2031 host = ui.config("http_proxy", "host")
2032 if host is None:
2033 host = os.environ.get("http_proxy")
2034 if host and host.startswith('http://'):
2035 host = host[7:]
2036 user = ui.config("http_proxy", "user")
2037 passwd = ui.config("http_proxy", "passwd")
2038 no = ui.config("http_proxy", "no")
2039 if no is None:
2040 no = os.environ.get("no_proxy")
2041 if no:
2042 no_list = no_list + no.split(",")
2043
2044 no_proxy = 0
2045 for h in no_list:
2046 if (path.startswith("http://" + h + "/") or
2047 path.startswith("http://" + h + ":") or
2048 path == "http://" + h):
2049 no_proxy = 1
2050
2051 # Note: urllib2 takes proxy values from the environment and those will
2052 # take precedence
2053 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
2054 try:
2055 if os.environ.has_key(env):
2056 del os.environ[env]
2057 except OSError:
2058 pass
2059
2060 proxy_handler = urllib2.BaseHandler()
2061 if host and not no_proxy:
2062 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
2063
2064 authinfo = None
2065 if user and passwd:
2066 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
2067 passmgr.add_password(None, host, user, passwd)
2068 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
2069
2070 opener = urllib2.build_opener(proxy_handler, authinfo)
2071 urllib2.install_opener(opener)
2072
2073 def dev(self):
2074 return -1
2075
2076 def do_cmd(self, cmd, **args):
2077 self.ui.debug("sending %s command\n" % cmd)
2078 q = {"cmd": cmd}
2079 q.update(args)
2080 qs = urllib.urlencode(q)
2081 cu = "%s?%s" % (self.url, qs)
2082 resp = urllib2.urlopen(cu)
2083 proto = resp.headers['content-type']
2084
2085 # accept old "text/plain" and "application/hg-changegroup" for now
2086 if not proto.startswith('application/mercurial') and \
2087 not proto.startswith('text/plain') and \
2088 not proto.startswith('application/hg-changegroup'):
2089 raise RepoError("'%s' does not appear to be an hg repository"
2090 % self.url)
2091
2092 if proto.startswith('application/mercurial'):
2093 version = proto[22:]
2094 if float(version) > 0.1:
2095 raise RepoError("'%s' uses newer protocol %s" %
2096 (self.url, version))
2097
2098 return resp
2099
2100 def heads(self):
2101 d = self.do_cmd("heads").read()
2102 try:
2103 return map(bin, d[:-1].split(" "))
2104 except:
2105 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2106 raise
2107
2108 def branches(self, nodes):
2109 n = " ".join(map(hex, nodes))
2110 d = self.do_cmd("branches", nodes=n).read()
2111 try:
2112 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2113 return br
2114 except:
2115 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2116 raise
2117
2118 def between(self, pairs):
2119 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2120 d = self.do_cmd("between", pairs=n).read()
2121 try:
2122 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2123 return p
2124 except:
2125 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2126 raise
2127
2128 def changegroup(self, nodes):
2129 n = " ".join(map(hex, nodes))
2130 f = self.do_cmd("changegroup", roots=n)
2131 bytes = 0
2132
2133 class zread:
2134 def __init__(self, f):
2135 self.zd = zlib.decompressobj()
2136 self.f = f
2137 self.buf = ""
2138 def read(self, l):
2139 while l > len(self.buf):
2140 r = self.f.read(4096)
2141 if r:
2142 self.buf += self.zd.decompress(r)
2143 else:
2144 self.buf += self.zd.flush()
2145 break
2146 d, self.buf = self.buf[:l], self.buf[l:]
2147 return d
2148
2149 return zread(f)
2150
2151 class remotelock:
2152 def __init__(self, repo):
2153 self.repo = repo
2154 def release(self):
2155 self.repo.unlock()
2156 self.repo = None
2157 def __del__(self):
2158 if self.repo:
2159 self.release()
2160
2161 class sshrepository(remoterepository):
2162 def __init__(self, ui, path):
2163 self.url = path
2164 self.ui = ui
2165
2166 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
2167 if not m:
2168 raise RepoError("couldn't parse destination %s" % path)
2169
2170 self.user = m.group(2)
2171 self.host = m.group(3)
2172 self.port = m.group(5)
2173 self.path = m.group(7) or "."
2174
2175 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2176 args = self.port and ("%s -p %s") % (args, self.port) or args
2177
2178 sshcmd = self.ui.config("ui", "ssh", "ssh")
2179 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2180 cmd = "%s %s '%s -R %s serve --stdio'"
2181 cmd = cmd % (sshcmd, args, remotecmd, self.path)
2182
2183 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2184
2185 def readerr(self):
2186 while 1:
2187 r,w,x = select.select([self.pipee], [], [], 0)
2188 if not r: break
2189 l = self.pipee.readline()
2190 if not l: break
2191 self.ui.status("remote: ", l)
2192
2193 def __del__(self):
2194 try:
2195 self.pipeo.close()
2196 self.pipei.close()
2197 for l in self.pipee:
2198 self.ui.status("remote: ", l)
2199 self.pipee.close()
2200 except:
2201 pass
2202
2203 def dev(self):
2204 return -1
2205
2206 def do_cmd(self, cmd, **args):
2207 self.ui.debug("sending %s command\n" % cmd)
2208 self.pipeo.write("%s\n" % cmd)
2209 for k, v in args.items():
2210 self.pipeo.write("%s %d\n" % (k, len(v)))
2211 self.pipeo.write(v)
2212 self.pipeo.flush()
2213
2214 return self.pipei
2215
2216 def call(self, cmd, **args):
2217 r = self.do_cmd(cmd, **args)
2218 l = r.readline()
2219 self.readerr()
2220 try:
2221 l = int(l)
2222 except:
2223 raise RepoError("unexpected response '%s'" % l)
2224 return r.read(l)
2225
2226 def lock(self):
2227 self.call("lock")
2228 return remotelock(self)
2229
2230 def unlock(self):
2231 self.call("unlock")
2232
2233 def heads(self):
2234 d = self.call("heads")
2235 try:
2236 return map(bin, d[:-1].split(" "))
2237 except:
2238 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2239
2240 def branches(self, nodes):
2241 n = " ".join(map(hex, nodes))
2242 d = self.call("branches", nodes=n)
2243 try:
2244 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2245 return br
2246 except:
2247 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2248
2249 def between(self, pairs):
2250 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2251 d = self.call("between", pairs=n)
2252 try:
2253 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2254 return p
2255 except:
2256 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2257
2258 def changegroup(self, nodes):
2259 n = " ".join(map(hex, nodes))
2260 f = self.do_cmd("changegroup", roots=n)
2261 return self.pipei
2262
2263 def addchangegroup(self, cg):
2264 d = self.call("addchangegroup")
2265 if d:
2266 raise RepoError("push refused: %s", d)
2267
2268 while 1:
2269 d = cg.read(4096)
2270 if not d: break
2271 self.pipeo.write(d)
2272 self.readerr()
2273
2274 self.pipeo.flush()
2275
2276 self.readerr()
2277 l = int(self.pipei.readline())
2278 return self.pipei.read(l) != ""
2279
2280 class httpsrepository(httprepository):
2281 pass
2282
2283 def repository(ui, path=None, create=0):
2284 if path:
2285 if path.startswith("http://"):
2286 return httprepository(ui, path)
2287 if path.startswith("https://"):
2288 return httpsrepository(ui, path)
2289 if path.startswith("hg://"):
2290 return httprepository(ui, path.replace("hg://", "http://"))
2291 if path.startswith("old-http://"):
2292 return localrepository(ui, path.replace("old-http://", "http://"))
2293 if path.startswith("ssh://"):
2294 return sshrepository(ui, path)
2295
2296 return localrepository(ui, path, create)
This diff has been collapsed as it changes many lines, (2002 lines changed) Show them Hide them
@@ -1,295 +1,16 b''
1 # hg.py - repository classes for mercurial
1 """
2 #
2 dirstate.py - working directory tracking for mercurial
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3
4 #
4 Copyright 2005 Matt Mackall <mpm@selenic.com>
5 # This software may be used and distributed according to the terms
5
6 # of the GNU General Public License, incorporated herein by reference.
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8 """
7
9
8 import sys, struct, os
10 import sys, struct, os
9 import util
10 from revlog import *
11 from revlog import *
11 from demandload import *
12 from demandload import *
12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 demandload(globals(), "time bisect stat util")
13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 demandload(globals(), "bisect errno select stat")
15
16 class filelog(revlog):
17 def __init__(self, opener, path):
18 revlog.__init__(self, opener,
19 os.path.join("data", self.encodedir(path + ".i")),
20 os.path.join("data", self.encodedir(path + ".d")))
21
22 # This avoids a collision between a file named foo and a dir named
23 # foo.i or foo.d
24 def encodedir(self, path):
25 return (path
26 .replace(".hg/", ".hg.hg/")
27 .replace(".i/", ".i.hg/")
28 .replace(".d/", ".d.hg/"))
29
30 def decodedir(self, path):
31 return (path
32 .replace(".d.hg/", ".d/")
33 .replace(".i.hg/", ".i/")
34 .replace(".hg.hg/", ".hg/"))
35
36 def read(self, node):
37 t = self.revision(node)
38 if not t.startswith('\1\n'):
39 return t
40 s = t.find('\1\n', 2)
41 return t[s+2:]
42
43 def readmeta(self, node):
44 t = self.revision(node)
45 if not t.startswith('\1\n'):
46 return t
47 s = t.find('\1\n', 2)
48 mt = t[2:s]
49 for l in mt.splitlines():
50 k, v = l.split(": ", 1)
51 m[k] = v
52 return m
53
54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 if meta or text.startswith('\1\n'):
56 mt = ""
57 if meta:
58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 return self.addrevision(text, transaction, link, p1, p2)
61
62 def annotate(self, node):
63
64 def decorate(text, rev):
65 return ([rev] * len(text.splitlines()), text)
66
67 def pair(parent, child):
68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 child[0][b1:b2] = parent[0][a1:a2]
70 return child
71
72 # find all ancestors
73 needed = {node:1}
74 visit = [node]
75 while visit:
76 n = visit.pop(0)
77 for p in self.parents(n):
78 if p not in needed:
79 needed[p] = 1
80 visit.append(p)
81 else:
82 # count how many times we'll use this
83 needed[p] += 1
84
85 # sort by revision which is a topological order
86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 visit.sort()
88 hist = {}
89
90 for r,n in visit:
91 curr = decorate(self.read(n), self.linkrev(n))
92 for p in self.parents(n):
93 if p != nullid:
94 curr = pair(hist[p], curr)
95 # trim the history of unneeded revs
96 needed[p] -= 1
97 if not needed[p]:
98 del hist[p]
99 hist[n] = curr
100
101 return zip(hist[n][0], hist[n][1].splitlines(1))
102
103 class manifest(revlog):
104 def __init__(self, opener):
105 self.mapcache = None
106 self.listcache = None
107 self.addlist = None
108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109
110 def read(self, node):
111 if node == nullid: return {} # don't upset local cache
112 if self.mapcache and self.mapcache[0] == node:
113 return self.mapcache[1]
114 text = self.revision(node)
115 map = {}
116 flag = {}
117 self.listcache = (text, text.splitlines(1))
118 for l in self.listcache[1]:
119 (f, n) = l.split('\0')
120 map[f] = bin(n[:40])
121 flag[f] = (n[40:-1] == "x")
122 self.mapcache = (node, map, flag)
123 return map
124
125 def readflags(self, node):
126 if node == nullid: return {} # don't upset local cache
127 if not self.mapcache or self.mapcache[0] != node:
128 self.read(node)
129 return self.mapcache[2]
130
131 def diff(self, a, b):
132 # this is sneaky, as we're not actually using a and b
133 if self.listcache and self.addlist and self.listcache[0] == a:
134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 if mdiff.patch(a, d) != b:
136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 return mdiff.textdiff(a, b)
138 return d
139 else:
140 return mdiff.textdiff(a, b)
141
142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 changed=None):
144 # directly generate the mdiff delta from the data collected during
145 # the bisect loop below
146 def gendelta(delta):
147 i = 0
148 result = []
149 while i < len(delta):
150 start = delta[i][2]
151 end = delta[i][3]
152 l = delta[i][4]
153 if l == None:
154 l = ""
155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 and end >= delta[i+1][2]:
157 if delta[i+1][3] > end:
158 end = delta[i+1][3]
159 if delta[i+1][4]:
160 l += delta[i+1][4]
161 i += 1
162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 i += 1
164 return result
165
166 # apply the changes collected during the bisect loop to our addlist
167 def addlistdelta(addlist, delta):
168 # apply the deltas to the addlist. start from the bottom up
169 # so changes to the offsets don't mess things up.
170 i = len(delta)
171 while i > 0:
172 i -= 1
173 start = delta[i][0]
174 end = delta[i][1]
175 if delta[i][4]:
176 addlist[start:end] = [delta[i][4]]
177 else:
178 del addlist[start:end]
179 return addlist
180
181 # calculate the byte offset of the start of each line in the
182 # manifest
183 def calcoffsets(addlist):
184 offsets = [0] * (len(addlist) + 1)
185 offset = 0
186 i = 0
187 while i < len(addlist):
188 offsets[i] = offset
189 offset += len(addlist[i])
190 i += 1
191 offsets[i] = offset
192 return offsets
193
194 # if we're using the listcache, make sure it is valid and
195 # parented by the same node we're diffing against
196 if not changed or not self.listcache or not p1 or \
197 self.mapcache[0] != p1:
198 files = map.keys()
199 files.sort()
200
201 self.addlist = ["%s\000%s%s\n" %
202 (f, hex(map[f]), flags[f] and "x" or '')
203 for f in files]
204 cachedelta = None
205 else:
206 addlist = self.listcache[1]
207
208 # find the starting offset for each line in the add list
209 offsets = calcoffsets(addlist)
210
211 # combine the changed lists into one list for sorting
212 work = [[x, 0] for x in changed[0]]
213 work[len(work):] = [[x, 1] for x in changed[1]]
214 work.sort()
215
216 delta = []
217 bs = 0
218
219 for w in work:
220 f = w[0]
221 # bs will either be the index of the item or the insert point
222 bs = bisect.bisect(addlist, f, bs)
223 if bs < len(addlist):
224 fn = addlist[bs][:addlist[bs].index('\0')]
225 else:
226 fn = None
227 if w[1] == 0:
228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 flags[f] and "x" or '')
230 else:
231 l = None
232 start = bs
233 if fn != f:
234 # item not found, insert a new one
235 end = bs
236 if w[1] == 1:
237 sys.stderr.write("failed to remove %s from manifest\n"
238 % f)
239 sys.exit(1)
240 else:
241 # item is found, replace/delete the existing line
242 end = bs + 1
243 delta.append([start, end, offsets[start], offsets[end], l])
244
245 self.addlist = addlistdelta(addlist, delta)
246 if self.mapcache[0] == self.tip():
247 cachedelta = "".join(gendelta(delta))
248 else:
249 cachedelta = None
250
251 text = "".join(self.addlist)
252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 sys.stderr.write("manifest delta failure\n")
254 sys.exit(1)
255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 self.mapcache = (n, map, flags)
257 self.listcache = (text, self.addlist)
258 self.addlist = None
259
260 return n
261
262 class changelog(revlog):
263 def __init__(self, opener):
264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265
266 def extract(self, text):
267 if not text:
268 return (nullid, "", "0", [], "")
269 last = text.index("\n\n")
270 desc = text[last + 2:]
271 l = text[:last].splitlines()
272 manifest = bin(l[0])
273 user = l[1]
274 date = l[2]
275 if " " not in date:
276 date += " 0" # some tools used -d without a timezone
277 files = l[3:]
278 return (manifest, user, date, files, desc)
279
280 def read(self, node):
281 return self.extract(self.revision(node))
282
283 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
284 user=None, date=None):
285 if not date:
286 if time.daylight: offset = time.altzone
287 else: offset = time.timezone
288 date = "%d %d" % (time.time(), offset)
289 list.sort()
290 l = [hex(manifest), user, date] + list + ["", desc]
291 text = "\n".join(l)
292 return self.addrevision(text, transaction, self.count(), p1, p2)
293
14
294 class dirstate:
15 class dirstate:
295 def __init__(self, opener, ui, root):
16 def __init__(self, opener, ui, root):
@@ -589,1708 +310,3 b' class dirstate:'
589 else:
310 else:
590 deleted.append(fn)
311 deleted.append(fn)
591 return (lookup, modified, added, removed + deleted, unknown)
312 return (lookup, modified, added, removed + deleted, unknown)
592
593 # used to avoid circular references so destructors work
594 def opener(base):
595 p = base
596 def o(path, mode="r"):
597 if p.startswith("http://"):
598 f = os.path.join(p, urllib.quote(path))
599 return httprangereader.httprangereader(f)
600
601 f = os.path.join(p, path)
602
603 mode += "b" # for that other OS
604
605 if mode[0] != "r":
606 try:
607 s = os.stat(f)
608 except OSError:
609 d = os.path.dirname(f)
610 if not os.path.isdir(d):
611 os.makedirs(d)
612 else:
613 if s.st_nlink > 1:
614 file(f + ".tmp", "wb").write(file(f, "rb").read())
615 util.rename(f+".tmp", f)
616
617 return file(f, mode)
618
619 return o
620
621 class RepoError(Exception): pass
622
623 class localrepository:
624 def __init__(self, ui, path=None, create=0):
625 self.remote = 0
626 if path and path.startswith("http://"):
627 self.remote = 1
628 self.path = path
629 else:
630 if not path:
631 p = os.getcwd()
632 while not os.path.isdir(os.path.join(p, ".hg")):
633 oldp = p
634 p = os.path.dirname(p)
635 if p == oldp: raise RepoError("no repo found")
636 path = p
637 self.path = os.path.join(path, ".hg")
638
639 if not create and not os.path.isdir(self.path):
640 raise RepoError("repository %s not found" % self.path)
641
642 self.root = os.path.abspath(path)
643 self.ui = ui
644
645 if create:
646 os.mkdir(self.path)
647 os.mkdir(self.join("data"))
648
649 self.opener = opener(self.path)
650 self.wopener = opener(self.root)
651 self.manifest = manifest(self.opener)
652 self.changelog = changelog(self.opener)
653 self.tagscache = None
654 self.nodetagscache = None
655
656 if not self.remote:
657 self.dirstate = dirstate(self.opener, ui, self.root)
658 try:
659 self.ui.readconfig(self.opener("hgrc"))
660 except IOError: pass
661
662 def hook(self, name, **args):
663 s = self.ui.config("hooks", name)
664 if s:
665 self.ui.note("running hook %s: %s\n" % (name, s))
666 old = {}
667 for k, v in args.items():
668 k = k.upper()
669 old[k] = os.environ.get(k, None)
670 os.environ[k] = v
671
672 r = os.system(s)
673
674 for k, v in old.items():
675 if v != None:
676 os.environ[k] = v
677 else:
678 del os.environ[k]
679
680 if r:
681 self.ui.warn("abort: %s hook failed with status %d!\n" %
682 (name, r))
683 return False
684 return True
685
686 def tags(self):
687 '''return a mapping of tag to node'''
688 if not self.tagscache:
689 self.tagscache = {}
690 def addtag(self, k, n):
691 try:
692 bin_n = bin(n)
693 except TypeError:
694 bin_n = ''
695 self.tagscache[k.strip()] = bin_n
696
697 try:
698 # read each head of the tags file, ending with the tip
699 # and add each tag found to the map, with "newer" ones
700 # taking precedence
701 fl = self.file(".hgtags")
702 h = fl.heads()
703 h.reverse()
704 for r in h:
705 for l in fl.read(r).splitlines():
706 if l:
707 n, k = l.split(" ", 1)
708 addtag(self, k, n)
709 except KeyError:
710 pass
711
712 try:
713 f = self.opener("localtags")
714 for l in f:
715 n, k = l.split(" ", 1)
716 addtag(self, k, n)
717 except IOError:
718 pass
719
720 self.tagscache['tip'] = self.changelog.tip()
721
722 return self.tagscache
723
724 def tagslist(self):
725 '''return a list of tags ordered by revision'''
726 l = []
727 for t, n in self.tags().items():
728 try:
729 r = self.changelog.rev(n)
730 except:
731 r = -2 # sort to the beginning of the list if unknown
732 l.append((r,t,n))
733 l.sort()
734 return [(t,n) for r,t,n in l]
735
736 def nodetags(self, node):
737 '''return the tags associated with a node'''
738 if not self.nodetagscache:
739 self.nodetagscache = {}
740 for t,n in self.tags().items():
741 self.nodetagscache.setdefault(n,[]).append(t)
742 return self.nodetagscache.get(node, [])
743
744 def lookup(self, key):
745 try:
746 return self.tags()[key]
747 except KeyError:
748 try:
749 return self.changelog.lookup(key)
750 except:
751 raise RepoError("unknown revision '%s'" % key)
752
753 def dev(self):
754 if self.remote: return -1
755 return os.stat(self.path).st_dev
756
757 def local(self):
758 return not self.remote
759
760 def join(self, f):
761 return os.path.join(self.path, f)
762
763 def wjoin(self, f):
764 return os.path.join(self.root, f)
765
766 def file(self, f):
767 if f[0] == '/': f = f[1:]
768 return filelog(self.opener, f)
769
770 def getcwd(self):
771 return self.dirstate.getcwd()
772
773 def wfile(self, f, mode='r'):
774 return self.wopener(f, mode)
775
776 def wread(self, filename):
777 return self.wopener(filename, 'r').read()
778
779 def wwrite(self, filename, data, fd=None):
780 if fd:
781 return fd.write(data)
782 return self.wopener(filename, 'w').write(data)
783
784 def transaction(self):
785 # save dirstate for undo
786 try:
787 ds = self.opener("dirstate").read()
788 except IOError:
789 ds = ""
790 self.opener("journal.dirstate", "w").write(ds)
791
792 def after():
793 util.rename(self.join("journal"), self.join("undo"))
794 util.rename(self.join("journal.dirstate"),
795 self.join("undo.dirstate"))
796
797 return transaction.transaction(self.ui.warn, self.opener,
798 self.join("journal"), after)
799
800 def recover(self):
801 lock = self.lock()
802 if os.path.exists(self.join("journal")):
803 self.ui.status("rolling back interrupted transaction\n")
804 return transaction.rollback(self.opener, self.join("journal"))
805 else:
806 self.ui.warn("no interrupted transaction available\n")
807
808 def undo(self):
809 lock = self.lock()
810 if os.path.exists(self.join("undo")):
811 self.ui.status("rolling back last transaction\n")
812 transaction.rollback(self.opener, self.join("undo"))
813 self.dirstate = None
814 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
815 self.dirstate = dirstate(self.opener, self.ui, self.root)
816 else:
817 self.ui.warn("no undo information available\n")
818
819 def lock(self, wait=1):
820 try:
821 return lock.lock(self.join("lock"), 0)
822 except lock.LockHeld, inst:
823 if wait:
824 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
825 return lock.lock(self.join("lock"), wait)
826 raise inst
827
828 def rawcommit(self, files, text, user, date, p1=None, p2=None):
829 orig_parent = self.dirstate.parents()[0] or nullid
830 p1 = p1 or self.dirstate.parents()[0] or nullid
831 p2 = p2 or self.dirstate.parents()[1] or nullid
832 c1 = self.changelog.read(p1)
833 c2 = self.changelog.read(p2)
834 m1 = self.manifest.read(c1[0])
835 mf1 = self.manifest.readflags(c1[0])
836 m2 = self.manifest.read(c2[0])
837 changed = []
838
839 if orig_parent == p1:
840 update_dirstate = 1
841 else:
842 update_dirstate = 0
843
844 tr = self.transaction()
845 mm = m1.copy()
846 mfm = mf1.copy()
847 linkrev = self.changelog.count()
848 for f in files:
849 try:
850 t = self.wread(f)
851 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
852 r = self.file(f)
853 mfm[f] = tm
854
855 fp1 = m1.get(f, nullid)
856 fp2 = m2.get(f, nullid)
857
858 # is the same revision on two branches of a merge?
859 if fp2 == fp1:
860 fp2 = nullid
861
862 if fp2 != nullid:
863 # is one parent an ancestor of the other?
864 fpa = r.ancestor(fp1, fp2)
865 if fpa == fp1:
866 fp1, fp2 = fp2, nullid
867 elif fpa == fp2:
868 fp2 = nullid
869
870 # is the file unmodified from the parent?
871 if t == r.read(fp1):
872 # record the proper existing parent in manifest
873 # no need to add a revision
874 mm[f] = fp1
875 continue
876
877 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
878 changed.append(f)
879 if update_dirstate:
880 self.dirstate.update([f], "n")
881 except IOError:
882 try:
883 del mm[f]
884 del mfm[f]
885 if update_dirstate:
886 self.dirstate.forget([f])
887 except:
888 # deleted from p2?
889 pass
890
891 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
892 user = user or self.ui.username()
893 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
894 tr.close()
895 if update_dirstate:
896 self.dirstate.setparents(n, nullid)
897
898 def commit(self, files = None, text = "", user = None, date = None,
899 match = util.always, force=False):
900 commit = []
901 remove = []
902 changed = []
903
904 if files:
905 for f in files:
906 s = self.dirstate.state(f)
907 if s in 'nmai':
908 commit.append(f)
909 elif s == 'r':
910 remove.append(f)
911 else:
912 self.ui.warn("%s not tracked!\n" % f)
913 else:
914 (c, a, d, u) = self.changes(match=match)
915 commit = c + a
916 remove = d
917
918 p1, p2 = self.dirstate.parents()
919 c1 = self.changelog.read(p1)
920 c2 = self.changelog.read(p2)
921 m1 = self.manifest.read(c1[0])
922 mf1 = self.manifest.readflags(c1[0])
923 m2 = self.manifest.read(c2[0])
924
925 if not commit and not remove and not force and p2 == nullid:
926 self.ui.status("nothing changed\n")
927 return None
928
929 if not self.hook("precommit"):
930 return None
931
932 lock = self.lock()
933 tr = self.transaction()
934
935 # check in files
936 new = {}
937 linkrev = self.changelog.count()
938 commit.sort()
939 for f in commit:
940 self.ui.note(f + "\n")
941 try:
942 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
943 t = self.wread(f)
944 except IOError:
945 self.ui.warn("trouble committing %s!\n" % f)
946 raise
947
948 meta = {}
949 cp = self.dirstate.copied(f)
950 if cp:
951 meta["copy"] = cp
952 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
953 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
954
955 r = self.file(f)
956 fp1 = m1.get(f, nullid)
957 fp2 = m2.get(f, nullid)
958
959 # is the same revision on two branches of a merge?
960 if fp2 == fp1:
961 fp2 = nullid
962
963 if fp2 != nullid:
964 # is one parent an ancestor of the other?
965 fpa = r.ancestor(fp1, fp2)
966 if fpa == fp1:
967 fp1, fp2 = fp2, nullid
968 elif fpa == fp2:
969 fp2 = nullid
970
971 # is the file unmodified from the parent?
972 if not meta and t == r.read(fp1):
973 # record the proper existing parent in manifest
974 # no need to add a revision
975 new[f] = fp1
976 continue
977
978 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
979 # remember what we've added so that we can later calculate
980 # the files to pull from a set of changesets
981 changed.append(f)
982
983 # update manifest
984 m1.update(new)
985 for f in remove:
986 if f in m1:
987 del m1[f]
988 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
989 (new, remove))
990
991 # add changeset
992 new = new.keys()
993 new.sort()
994
995 if not text:
996 edittext = ""
997 if p2 != nullid:
998 edittext += "HG: branch merge\n"
999 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
1000 edittext += "".join(["HG: changed %s\n" % f for f in changed])
1001 edittext += "".join(["HG: removed %s\n" % f for f in remove])
1002 if not changed and not remove:
1003 edittext += "HG: no files changed\n"
1004 edittext = self.ui.edit(edittext)
1005 if not edittext.rstrip():
1006 return None
1007 text = edittext
1008
1009 user = user or self.ui.username()
1010 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
1011 tr.close()
1012
1013 self.dirstate.setparents(n)
1014 self.dirstate.update(new, "n")
1015 self.dirstate.forget(remove)
1016
1017 if not self.hook("commit", node=hex(n)):
1018 return None
1019 return n
1020
1021 def walk(self, node=None, files=[], match=util.always):
1022 if node:
1023 for fn in self.manifest.read(self.changelog.read(node)[0]):
1024 if match(fn): yield 'm', fn
1025 else:
1026 for src, fn in self.dirstate.walk(files, match):
1027 yield src, fn
1028
1029 def changes(self, node1 = None, node2 = None, files = [],
1030 match = util.always):
1031 mf2, u = None, []
1032
1033 def fcmp(fn, mf):
1034 t1 = self.wread(fn)
1035 t2 = self.file(fn).read(mf.get(fn, nullid))
1036 return cmp(t1, t2)
1037
1038 def mfmatches(node):
1039 mf = dict(self.manifest.read(node))
1040 for fn in mf.keys():
1041 if not match(fn):
1042 del mf[fn]
1043 return mf
1044
1045 # are we comparing the working directory?
1046 if not node2:
1047 l, c, a, d, u = self.dirstate.changes(files, match)
1048
1049 # are we comparing working dir against its parent?
1050 if not node1:
1051 if l:
1052 # do a full compare of any files that might have changed
1053 change = self.changelog.read(self.dirstate.parents()[0])
1054 mf2 = mfmatches(change[0])
1055 for f in l:
1056 if fcmp(f, mf2):
1057 c.append(f)
1058
1059 for l in c, a, d, u:
1060 l.sort()
1061
1062 return (c, a, d, u)
1063
1064 # are we comparing working dir against non-tip?
1065 # generate a pseudo-manifest for the working dir
1066 if not node2:
1067 if not mf2:
1068 change = self.changelog.read(self.dirstate.parents()[0])
1069 mf2 = mfmatches(change[0])
1070 for f in a + c + l:
1071 mf2[f] = ""
1072 for f in d:
1073 if f in mf2: del mf2[f]
1074 else:
1075 change = self.changelog.read(node2)
1076 mf2 = mfmatches(change[0])
1077
1078 # flush lists from dirstate before comparing manifests
1079 c, a = [], []
1080
1081 change = self.changelog.read(node1)
1082 mf1 = mfmatches(change[0])
1083
1084 for fn in mf2:
1085 if mf1.has_key(fn):
1086 if mf1[fn] != mf2[fn]:
1087 if mf2[fn] != "" or fcmp(fn, mf1):
1088 c.append(fn)
1089 del mf1[fn]
1090 else:
1091 a.append(fn)
1092
1093 d = mf1.keys()
1094
1095 for l in c, a, d, u:
1096 l.sort()
1097
1098 return (c, a, d, u)
1099
1100 def add(self, list):
1101 for f in list:
1102 p = self.wjoin(f)
1103 if not os.path.exists(p):
1104 self.ui.warn("%s does not exist!\n" % f)
1105 elif not os.path.isfile(p):
1106 self.ui.warn("%s not added: only files supported currently\n" % f)
1107 elif self.dirstate.state(f) in 'an':
1108 self.ui.warn("%s already tracked!\n" % f)
1109 else:
1110 self.dirstate.update([f], "a")
1111
1112 def forget(self, list):
1113 for f in list:
1114 if self.dirstate.state(f) not in 'ai':
1115 self.ui.warn("%s not added!\n" % f)
1116 else:
1117 self.dirstate.forget([f])
1118
1119 def remove(self, list):
1120 for f in list:
1121 p = self.wjoin(f)
1122 if os.path.exists(p):
1123 self.ui.warn("%s still exists!\n" % f)
1124 elif self.dirstate.state(f) == 'a':
1125 self.ui.warn("%s never committed!\n" % f)
1126 self.dirstate.forget([f])
1127 elif f not in self.dirstate:
1128 self.ui.warn("%s not tracked!\n" % f)
1129 else:
1130 self.dirstate.update([f], "r")
1131
1132 def copy(self, source, dest):
1133 p = self.wjoin(dest)
1134 if not os.path.exists(p):
1135 self.ui.warn("%s does not exist!\n" % dest)
1136 elif not os.path.isfile(p):
1137 self.ui.warn("copy failed: %s is not a file\n" % dest)
1138 else:
1139 if self.dirstate.state(dest) == '?':
1140 self.dirstate.update([dest], "a")
1141 self.dirstate.copy(source, dest)
1142
1143 def heads(self):
1144 return self.changelog.heads()
1145
1146 # branchlookup returns a dict giving a list of branches for
1147 # each head. A branch is defined as the tag of a node or
1148 # the branch of the node's parents. If a node has multiple
1149 # branch tags, tags are eliminated if they are visible from other
1150 # branch tags.
1151 #
1152 # So, for this graph: a->b->c->d->e
1153 # \ /
1154 # aa -----/
1155 # a has tag 2.6.12
1156 # d has tag 2.6.13
1157 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1158 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1159 # from the list.
1160 #
1161 # It is possible that more than one head will have the same branch tag.
1162 # callers need to check the result for multiple heads under the same
1163 # branch tag if that is a problem for them (ie checkout of a specific
1164 # branch).
1165 #
1166 # passing in a specific branch will limit the depth of the search
1167 # through the parents. It won't limit the branches returned in the
1168 # result though.
1169 def branchlookup(self, heads=None, branch=None):
1170 if not heads:
1171 heads = self.heads()
1172 headt = [ h for h in heads ]
1173 chlog = self.changelog
1174 branches = {}
1175 merges = []
1176 seenmerge = {}
1177
1178 # traverse the tree once for each head, recording in the branches
1179 # dict which tags are visible from this head. The branches
1180 # dict also records which tags are visible from each tag
1181 # while we traverse.
1182 while headt or merges:
1183 if merges:
1184 n, found = merges.pop()
1185 visit = [n]
1186 else:
1187 h = headt.pop()
1188 visit = [h]
1189 found = [h]
1190 seen = {}
1191 while visit:
1192 n = visit.pop()
1193 if n in seen:
1194 continue
1195 pp = chlog.parents(n)
1196 tags = self.nodetags(n)
1197 if tags:
1198 for x in tags:
1199 if x == 'tip':
1200 continue
1201 for f in found:
1202 branches.setdefault(f, {})[n] = 1
1203 branches.setdefault(n, {})[n] = 1
1204 break
1205 if n not in found:
1206 found.append(n)
1207 if branch in tags:
1208 continue
1209 seen[n] = 1
1210 if pp[1] != nullid and n not in seenmerge:
1211 merges.append((pp[1], [x for x in found]))
1212 seenmerge[n] = 1
1213 if pp[0] != nullid:
1214 visit.append(pp[0])
1215 # traverse the branches dict, eliminating branch tags from each
1216 # head that are visible from another branch tag for that head.
1217 out = {}
1218 viscache = {}
1219 for h in heads:
1220 def visible(node):
1221 if node in viscache:
1222 return viscache[node]
1223 ret = {}
1224 visit = [node]
1225 while visit:
1226 x = visit.pop()
1227 if x in viscache:
1228 ret.update(viscache[x])
1229 elif x not in ret:
1230 ret[x] = 1
1231 if x in branches:
1232 visit[len(visit):] = branches[x].keys()
1233 viscache[node] = ret
1234 return ret
1235 if h not in branches:
1236 continue
1237 # O(n^2), but somewhat limited. This only searches the
1238 # tags visible from a specific head, not all the tags in the
1239 # whole repo.
1240 for b in branches[h]:
1241 vis = False
1242 for bb in branches[h].keys():
1243 if b != bb:
1244 if b in visible(bb):
1245 vis = True
1246 break
1247 if not vis:
1248 l = out.setdefault(h, [])
1249 l[len(l):] = self.nodetags(b)
1250 return out
1251
1252 def branches(self, nodes):
1253 if not nodes: nodes = [self.changelog.tip()]
1254 b = []
1255 for n in nodes:
1256 t = n
1257 while n:
1258 p = self.changelog.parents(n)
1259 if p[1] != nullid or p[0] == nullid:
1260 b.append((t, n, p[0], p[1]))
1261 break
1262 n = p[0]
1263 return b
1264
1265 def between(self, pairs):
1266 r = []
1267
1268 for top, bottom in pairs:
1269 n, l, i = top, [], 0
1270 f = 1
1271
1272 while n != bottom:
1273 p = self.changelog.parents(n)[0]
1274 if i == f:
1275 l.append(n)
1276 f = f * 2
1277 n = p
1278 i += 1
1279
1280 r.append(l)
1281
1282 return r
1283
1284 def newer(self, nodes):
1285 m = {}
1286 nl = []
1287 pm = {}
1288 cl = self.changelog
1289 t = l = cl.count()
1290
1291 # find the lowest numbered node
1292 for n in nodes:
1293 l = min(l, cl.rev(n))
1294 m[n] = 1
1295
1296 for i in xrange(l, t):
1297 n = cl.node(i)
1298 if n in m: # explicitly listed
1299 pm[n] = 1
1300 nl.append(n)
1301 continue
1302 for p in cl.parents(n):
1303 if p in pm: # parent listed
1304 pm[n] = 1
1305 nl.append(n)
1306 break
1307
1308 return nl
1309
1310 def findincoming(self, remote, base=None, heads=None):
1311 m = self.changelog.nodemap
1312 search = []
1313 fetch = {}
1314 seen = {}
1315 seenbranch = {}
1316 if base == None:
1317 base = {}
1318
1319 # assume we're closer to the tip than the root
1320 # and start by examining the heads
1321 self.ui.status("searching for changes\n")
1322
1323 if not heads:
1324 heads = remote.heads()
1325
1326 unknown = []
1327 for h in heads:
1328 if h not in m:
1329 unknown.append(h)
1330 else:
1331 base[h] = 1
1332
1333 if not unknown:
1334 return None
1335
1336 rep = {}
1337 reqcnt = 0
1338
1339 # search through remote branches
1340 # a 'branch' here is a linear segment of history, with four parts:
1341 # head, root, first parent, second parent
1342 # (a branch always has two parents (or none) by definition)
1343 unknown = remote.branches(unknown)
1344 while unknown:
1345 r = []
1346 while unknown:
1347 n = unknown.pop(0)
1348 if n[0] in seen:
1349 continue
1350
1351 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1352 if n[0] == nullid:
1353 break
1354 if n in seenbranch:
1355 self.ui.debug("branch already found\n")
1356 continue
1357 if n[1] and n[1] in m: # do we know the base?
1358 self.ui.debug("found incomplete branch %s:%s\n"
1359 % (short(n[0]), short(n[1])))
1360 search.append(n) # schedule branch range for scanning
1361 seenbranch[n] = 1
1362 else:
1363 if n[1] not in seen and n[1] not in fetch:
1364 if n[2] in m and n[3] in m:
1365 self.ui.debug("found new changeset %s\n" %
1366 short(n[1]))
1367 fetch[n[1]] = 1 # earliest unknown
1368 base[n[2]] = 1 # latest known
1369 continue
1370
1371 for a in n[2:4]:
1372 if a not in rep:
1373 r.append(a)
1374 rep[a] = 1
1375
1376 seen[n[0]] = 1
1377
1378 if r:
1379 reqcnt += 1
1380 self.ui.debug("request %d: %s\n" %
1381 (reqcnt, " ".join(map(short, r))))
1382 for p in range(0, len(r), 10):
1383 for b in remote.branches(r[p:p+10]):
1384 self.ui.debug("received %s:%s\n" %
1385 (short(b[0]), short(b[1])))
1386 if b[0] in m:
1387 self.ui.debug("found base node %s\n" % short(b[0]))
1388 base[b[0]] = 1
1389 elif b[0] not in seen:
1390 unknown.append(b)
1391
1392 # do binary search on the branches we found
1393 while search:
1394 n = search.pop(0)
1395 reqcnt += 1
1396 l = remote.between([(n[0], n[1])])[0]
1397 l.append(n[1])
1398 p = n[0]
1399 f = 1
1400 for i in l:
1401 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1402 if i in m:
1403 if f <= 2:
1404 self.ui.debug("found new branch changeset %s\n" %
1405 short(p))
1406 fetch[p] = 1
1407 base[i] = 1
1408 else:
1409 self.ui.debug("narrowed branch search to %s:%s\n"
1410 % (short(p), short(i)))
1411 search.append((p, i))
1412 break
1413 p, f = i, f * 2
1414
1415 # sanity check our fetch list
1416 for f in fetch.keys():
1417 if f in m:
1418 raise RepoError("already have changeset " + short(f[:4]))
1419
1420 if base.keys() == [nullid]:
1421 self.ui.warn("warning: pulling from an unrelated repository!\n")
1422
1423 self.ui.note("found new changesets starting at " +
1424 " ".join([short(f) for f in fetch]) + "\n")
1425
1426 self.ui.debug("%d total queries\n" % reqcnt)
1427
1428 return fetch.keys()
1429
1430 def findoutgoing(self, remote, base=None, heads=None):
1431 if base == None:
1432 base = {}
1433 self.findincoming(remote, base, heads)
1434
1435 self.ui.debug("common changesets up to "
1436 + " ".join(map(short, base.keys())) + "\n")
1437
1438 remain = dict.fromkeys(self.changelog.nodemap)
1439
1440 # prune everything remote has from the tree
1441 del remain[nullid]
1442 remove = base.keys()
1443 while remove:
1444 n = remove.pop(0)
1445 if n in remain:
1446 del remain[n]
1447 for p in self.changelog.parents(n):
1448 remove.append(p)
1449
1450 # find every node whose parents have been pruned
1451 subset = []
1452 for n in remain:
1453 p1, p2 = self.changelog.parents(n)
1454 if p1 not in remain and p2 not in remain:
1455 subset.append(n)
1456
1457 # this is the set of all roots we have to push
1458 return subset
1459
1460 def pull(self, remote):
1461 lock = self.lock()
1462
1463 # if we have an empty repo, fetch everything
1464 if self.changelog.tip() == nullid:
1465 self.ui.status("requesting all changes\n")
1466 fetch = [nullid]
1467 else:
1468 fetch = self.findincoming(remote)
1469
1470 if not fetch:
1471 self.ui.status("no changes found\n")
1472 return 1
1473
1474 cg = remote.changegroup(fetch)
1475 return self.addchangegroup(cg)
1476
1477 def push(self, remote, force=False):
1478 lock = remote.lock()
1479
1480 base = {}
1481 heads = remote.heads()
1482 inc = self.findincoming(remote, base, heads)
1483 if not force and inc:
1484 self.ui.warn("abort: unsynced remote changes!\n")
1485 self.ui.status("(did you forget to sync? use push -f to force)\n")
1486 return 1
1487
1488 update = self.findoutgoing(remote, base)
1489 if not update:
1490 self.ui.status("no changes found\n")
1491 return 1
1492 elif not force:
1493 if len(heads) < len(self.changelog.heads()):
1494 self.ui.warn("abort: push creates new remote branches!\n")
1495 self.ui.status("(did you forget to merge?" +
1496 " use push -f to force)\n")
1497 return 1
1498
1499 cg = self.changegroup(update)
1500 return remote.addchangegroup(cg)
1501
1502 def changegroup(self, basenodes):
1503 class genread:
1504 def __init__(self, generator):
1505 self.g = generator
1506 self.buf = ""
1507 def fillbuf(self):
1508 self.buf += "".join(self.g)
1509
1510 def read(self, l):
1511 while l > len(self.buf):
1512 try:
1513 self.buf += self.g.next()
1514 except StopIteration:
1515 break
1516 d, self.buf = self.buf[:l], self.buf[l:]
1517 return d
1518
1519 def gengroup():
1520 nodes = self.newer(basenodes)
1521
1522 # construct the link map
1523 linkmap = {}
1524 for n in nodes:
1525 linkmap[self.changelog.rev(n)] = n
1526
1527 # construct a list of all changed files
1528 changed = {}
1529 for n in nodes:
1530 c = self.changelog.read(n)
1531 for f in c[3]:
1532 changed[f] = 1
1533 changed = changed.keys()
1534 changed.sort()
1535
1536 # the changegroup is changesets + manifests + all file revs
1537 revs = [ self.changelog.rev(n) for n in nodes ]
1538
1539 for y in self.changelog.group(linkmap): yield y
1540 for y in self.manifest.group(linkmap): yield y
1541 for f in changed:
1542 yield struct.pack(">l", len(f) + 4) + f
1543 g = self.file(f).group(linkmap)
1544 for y in g:
1545 yield y
1546
1547 yield struct.pack(">l", 0)
1548
1549 return genread(gengroup())
1550
1551 def addchangegroup(self, source):
1552
1553 def getchunk():
1554 d = source.read(4)
1555 if not d: return ""
1556 l = struct.unpack(">l", d)[0]
1557 if l <= 4: return ""
1558 return source.read(l - 4)
1559
1560 def getgroup():
1561 while 1:
1562 c = getchunk()
1563 if not c: break
1564 yield c
1565
1566 def csmap(x):
1567 self.ui.debug("add changeset %s\n" % short(x))
1568 return self.changelog.count()
1569
1570 def revmap(x):
1571 return self.changelog.rev(x)
1572
1573 if not source: return
1574 changesets = files = revisions = 0
1575
1576 tr = self.transaction()
1577
1578 oldheads = len(self.changelog.heads())
1579
1580 # pull off the changeset group
1581 self.ui.status("adding changesets\n")
1582 co = self.changelog.tip()
1583 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1584 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1585
1586 # pull off the manifest group
1587 self.ui.status("adding manifests\n")
1588 mm = self.manifest.tip()
1589 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1590
1591 # process the files
1592 self.ui.status("adding file changes\n")
1593 while 1:
1594 f = getchunk()
1595 if not f: break
1596 self.ui.debug("adding %s revisions\n" % f)
1597 fl = self.file(f)
1598 o = fl.count()
1599 n = fl.addgroup(getgroup(), revmap, tr)
1600 revisions += fl.count() - o
1601 files += 1
1602
1603 newheads = len(self.changelog.heads())
1604 heads = ""
1605 if oldheads and newheads > oldheads:
1606 heads = " (+%d heads)" % (newheads - oldheads)
1607
1608 self.ui.status(("added %d changesets" +
1609 " with %d changes to %d files%s\n")
1610 % (changesets, revisions, files, heads))
1611
1612 tr.close()
1613
1614 if not self.hook("changegroup"):
1615 return 1
1616
1617 return
1618
1619 def update(self, node, allow=False, force=False, choose=None,
1620 moddirstate=True):
1621 pl = self.dirstate.parents()
1622 if not force and pl[1] != nullid:
1623 self.ui.warn("aborting: outstanding uncommitted merges\n")
1624 return 1
1625
1626 p1, p2 = pl[0], node
1627 pa = self.changelog.ancestor(p1, p2)
1628 m1n = self.changelog.read(p1)[0]
1629 m2n = self.changelog.read(p2)[0]
1630 man = self.manifest.ancestor(m1n, m2n)
1631 m1 = self.manifest.read(m1n)
1632 mf1 = self.manifest.readflags(m1n)
1633 m2 = self.manifest.read(m2n)
1634 mf2 = self.manifest.readflags(m2n)
1635 ma = self.manifest.read(man)
1636 mfa = self.manifest.readflags(man)
1637
1638 (c, a, d, u) = self.changes()
1639
1640 # is this a jump, or a merge? i.e. is there a linear path
1641 # from p1 to p2?
1642 linear_path = (pa == p1 or pa == p2)
1643
1644 # resolve the manifest to determine which files
1645 # we care about merging
1646 self.ui.note("resolving manifests\n")
1647 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1648 (force, allow, moddirstate, linear_path))
1649 self.ui.debug(" ancestor %s local %s remote %s\n" %
1650 (short(man), short(m1n), short(m2n)))
1651
1652 merge = {}
1653 get = {}
1654 remove = []
1655
1656 # construct a working dir manifest
1657 mw = m1.copy()
1658 mfw = mf1.copy()
1659 umap = dict.fromkeys(u)
1660
1661 for f in a + c + u:
1662 mw[f] = ""
1663 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1664
1665 for f in d:
1666 if f in mw: del mw[f]
1667
1668 # If we're jumping between revisions (as opposed to merging),
1669 # and if neither the working directory nor the target rev has
1670 # the file, then we need to remove it from the dirstate, to
1671 # prevent the dirstate from listing the file when it is no
1672 # longer in the manifest.
1673 if moddirstate and linear_path and f not in m2:
1674 self.dirstate.forget((f,))
1675
1676 # Compare manifests
1677 for f, n in mw.iteritems():
1678 if choose and not choose(f): continue
1679 if f in m2:
1680 s = 0
1681
1682 # is the wfile new since m1, and match m2?
1683 if f not in m1:
1684 t1 = self.wread(f)
1685 t2 = self.file(f).read(m2[f])
1686 if cmp(t1, t2) == 0:
1687 n = m2[f]
1688 del t1, t2
1689
1690 # are files different?
1691 if n != m2[f]:
1692 a = ma.get(f, nullid)
1693 # are both different from the ancestor?
1694 if n != a and m2[f] != a:
1695 self.ui.debug(" %s versions differ, resolve\n" % f)
1696 # merge executable bits
1697 # "if we changed or they changed, change in merge"
1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1699 mode = ((a^b) | (a^c)) ^ a
1700 merge[f] = (m1.get(f, nullid), m2[f], mode)
1701 s = 1
1702 # are we clobbering?
1703 # is remote's version newer?
1704 # or are we going back in time?
1705 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1706 self.ui.debug(" remote %s is newer, get\n" % f)
1707 get[f] = m2[f]
1708 s = 1
1709 elif f in umap:
1710 # this unknown file is the same as the checkout
1711 get[f] = m2[f]
1712
1713 if not s and mfw[f] != mf2[f]:
1714 if force:
1715 self.ui.debug(" updating permissions for %s\n" % f)
1716 util.set_exec(self.wjoin(f), mf2[f])
1717 else:
1718 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1719 mode = ((a^b) | (a^c)) ^ a
1720 if mode != b:
1721 self.ui.debug(" updating permissions for %s\n" % f)
1722 util.set_exec(self.wjoin(f), mode)
1723 del m2[f]
1724 elif f in ma:
1725 if n != ma[f]:
1726 r = "d"
1727 if not force and (linear_path or allow):
1728 r = self.ui.prompt(
1729 (" local changed %s which remote deleted\n" % f) +
1730 "(k)eep or (d)elete?", "[kd]", "k")
1731 if r == "d":
1732 remove.append(f)
1733 else:
1734 self.ui.debug("other deleted %s\n" % f)
1735 remove.append(f) # other deleted it
1736 else:
1737 if n == m1.get(f, nullid): # same as parent
1738 if p2 == pa: # going backwards?
1739 self.ui.debug("remote deleted %s\n" % f)
1740 remove.append(f)
1741 else:
1742 self.ui.debug("local created %s, keeping\n" % f)
1743 else:
1744 self.ui.debug("working dir created %s, keeping\n" % f)
1745
1746 for f, n in m2.iteritems():
1747 if choose and not choose(f): continue
1748 if f[0] == "/": continue
1749 if f in ma and n != ma[f]:
1750 r = "k"
1751 if not force and (linear_path or allow):
1752 r = self.ui.prompt(
1753 ("remote changed %s which local deleted\n" % f) +
1754 "(k)eep or (d)elete?", "[kd]", "k")
1755 if r == "k": get[f] = n
1756 elif f not in ma:
1757 self.ui.debug("remote created %s\n" % f)
1758 get[f] = n
1759 else:
1760 if force or p2 == pa: # going backwards?
1761 self.ui.debug("local deleted %s, recreating\n" % f)
1762 get[f] = n
1763 else:
1764 self.ui.debug("local deleted %s\n" % f)
1765
1766 del mw, m1, m2, ma
1767
1768 if force:
1769 for f in merge:
1770 get[f] = merge[f][1]
1771 merge = {}
1772
1773 if linear_path or force:
1774 # we don't need to do any magic, just jump to the new rev
1775 branch_merge = False
1776 p1, p2 = p2, nullid
1777 else:
1778 if not allow:
1779 self.ui.status("this update spans a branch" +
1780 " affecting the following files:\n")
1781 fl = merge.keys() + get.keys()
1782 fl.sort()
1783 for f in fl:
1784 cf = ""
1785 if f in merge: cf = " (resolve)"
1786 self.ui.status(" %s%s\n" % (f, cf))
1787 self.ui.warn("aborting update spanning branches!\n")
1788 self.ui.status("(use update -m to merge across branches" +
1789 " or -C to lose changes)\n")
1790 return 1
1791 branch_merge = True
1792
1793 if moddirstate:
1794 self.dirstate.setparents(p1, p2)
1795
1796 # get the files we don't need to change
1797 files = get.keys()
1798 files.sort()
1799 for f in files:
1800 if f[0] == "/": continue
1801 self.ui.note("getting %s\n" % f)
1802 t = self.file(f).read(get[f])
1803 try:
1804 self.wwrite(f, t)
1805 except IOError:
1806 os.makedirs(os.path.dirname(self.wjoin(f)))
1807 self.wwrite(f, t)
1808 util.set_exec(self.wjoin(f), mf2[f])
1809 if moddirstate:
1810 if branch_merge:
1811 self.dirstate.update([f], 'n', st_mtime=-1)
1812 else:
1813 self.dirstate.update([f], 'n')
1814
1815 # merge the tricky bits
1816 files = merge.keys()
1817 files.sort()
1818 for f in files:
1819 self.ui.status("merging %s\n" % f)
1820 my, other, flag = merge[f]
1821 self.merge3(f, my, other)
1822 util.set_exec(self.wjoin(f), flag)
1823 if moddirstate:
1824 if branch_merge:
1825 # We've done a branch merge, mark this file as merged
1826 # so that we properly record the merger later
1827 self.dirstate.update([f], 'm')
1828 else:
1829 # We've update-merged a locally modified file, so
1830 # we set the dirstate to emulate a normal checkout
1831 # of that file some time in the past. Thus our
1832 # merge will appear as a normal local file
1833 # modification.
1834 f_len = len(self.file(f).read(other))
1835 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1836
1837 remove.sort()
1838 for f in remove:
1839 self.ui.note("removing %s\n" % f)
1840 try:
1841 os.unlink(self.wjoin(f))
1842 except OSError, inst:
1843 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1844 # try removing directories that might now be empty
1845 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1846 except: pass
1847 if moddirstate:
1848 if branch_merge:
1849 self.dirstate.update(remove, 'r')
1850 else:
1851 self.dirstate.forget(remove)
1852
1853 def merge3(self, fn, my, other):
1854 """perform a 3-way merge in the working directory"""
1855
1856 def temp(prefix, node):
1857 pre = "%s~%s." % (os.path.basename(fn), prefix)
1858 (fd, name) = tempfile.mkstemp("", pre)
1859 f = os.fdopen(fd, "wb")
1860 self.wwrite(fn, fl.read(node), f)
1861 f.close()
1862 return name
1863
1864 fl = self.file(fn)
1865 base = fl.ancestor(my, other)
1866 a = self.wjoin(fn)
1867 b = temp("base", base)
1868 c = temp("other", other)
1869
1870 self.ui.note("resolving %s\n" % fn)
1871 self.ui.debug("file %s: other %s ancestor %s\n" %
1872 (fn, short(other), short(base)))
1873
1874 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1875 or "hgmerge")
1876 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1877 if r:
1878 self.ui.warn("merging %s failed!\n" % fn)
1879
1880 os.unlink(b)
1881 os.unlink(c)
1882
1883 def verify(self):
1884 filelinkrevs = {}
1885 filenodes = {}
1886 changesets = revisions = files = 0
1887 errors = 0
1888
1889 seen = {}
1890 self.ui.status("checking changesets\n")
1891 for i in range(self.changelog.count()):
1892 changesets += 1
1893 n = self.changelog.node(i)
1894 if n in seen:
1895 self.ui.warn("duplicate changeset at revision %d\n" % i)
1896 errors += 1
1897 seen[n] = 1
1898
1899 for p in self.changelog.parents(n):
1900 if p not in self.changelog.nodemap:
1901 self.ui.warn("changeset %s has unknown parent %s\n" %
1902 (short(n), short(p)))
1903 errors += 1
1904 try:
1905 changes = self.changelog.read(n)
1906 except Exception, inst:
1907 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1908 errors += 1
1909
1910 for f in changes[3]:
1911 filelinkrevs.setdefault(f, []).append(i)
1912
1913 seen = {}
1914 self.ui.status("checking manifests\n")
1915 for i in range(self.manifest.count()):
1916 n = self.manifest.node(i)
1917 if n in seen:
1918 self.ui.warn("duplicate manifest at revision %d\n" % i)
1919 errors += 1
1920 seen[n] = 1
1921
1922 for p in self.manifest.parents(n):
1923 if p not in self.manifest.nodemap:
1924 self.ui.warn("manifest %s has unknown parent %s\n" %
1925 (short(n), short(p)))
1926 errors += 1
1927
1928 try:
1929 delta = mdiff.patchtext(self.manifest.delta(n))
1930 except KeyboardInterrupt:
1931 self.ui.warn("aborted")
1932 sys.exit(0)
1933 except Exception, inst:
1934 self.ui.warn("unpacking manifest %s: %s\n"
1935 % (short(n), inst))
1936 errors += 1
1937
1938 ff = [ l.split('\0') for l in delta.splitlines() ]
1939 for f, fn in ff:
1940 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1941
1942 self.ui.status("crosschecking files in changesets and manifests\n")
1943 for f in filenodes:
1944 if f not in filelinkrevs:
1945 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1946 errors += 1
1947
1948 for f in filelinkrevs:
1949 if f not in filenodes:
1950 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1951 errors += 1
1952
1953 self.ui.status("checking files\n")
1954 ff = filenodes.keys()
1955 ff.sort()
1956 for f in ff:
1957 if f == "/dev/null": continue
1958 files += 1
1959 fl = self.file(f)
1960 nodes = { nullid: 1 }
1961 seen = {}
1962 for i in range(fl.count()):
1963 revisions += 1
1964 n = fl.node(i)
1965
1966 if n in seen:
1967 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1968 errors += 1
1969
1970 if n not in filenodes[f]:
1971 self.ui.warn("%s: %d:%s not in manifests\n"
1972 % (f, i, short(n)))
1973 errors += 1
1974 else:
1975 del filenodes[f][n]
1976
1977 flr = fl.linkrev(n)
1978 if flr not in filelinkrevs[f]:
1979 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1980 % (f, short(n), fl.linkrev(n)))
1981 errors += 1
1982 else:
1983 filelinkrevs[f].remove(flr)
1984
1985 # verify contents
1986 try:
1987 t = fl.read(n)
1988 except Exception, inst:
1989 self.ui.warn("unpacking file %s %s: %s\n"
1990 % (f, short(n), inst))
1991 errors += 1
1992
1993 # verify parents
1994 (p1, p2) = fl.parents(n)
1995 if p1 not in nodes:
1996 self.ui.warn("file %s:%s unknown parent 1 %s" %
1997 (f, short(n), short(p1)))
1998 errors += 1
1999 if p2 not in nodes:
2000 self.ui.warn("file %s:%s unknown parent 2 %s" %
2001 (f, short(n), short(p1)))
2002 errors += 1
2003 nodes[n] = 1
2004
2005 # cross-check
2006 for node in filenodes[f]:
2007 self.ui.warn("node %s in manifests not in %s\n"
2008 % (hex(node), f))
2009 errors += 1
2010
2011 self.ui.status("%d files, %d changesets, %d total revisions\n" %
2012 (files, changesets, revisions))
2013
2014 if errors:
2015 self.ui.warn("%d integrity errors encountered!\n" % errors)
2016 return 1
2017
2018 class remoterepository:
2019 def local(self):
2020 return False
2021
2022 class httprepository(remoterepository):
2023 def __init__(self, ui, path):
2024 # fix missing / after hostname
2025 s = urlparse.urlsplit(path)
2026 partial = s[2]
2027 if not partial: partial = "/"
2028 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
2029 self.ui = ui
2030 no_list = [ "localhost", "127.0.0.1" ]
2031 host = ui.config("http_proxy", "host")
2032 if host is None:
2033 host = os.environ.get("http_proxy")
2034 if host and host.startswith('http://'):
2035 host = host[7:]
2036 user = ui.config("http_proxy", "user")
2037 passwd = ui.config("http_proxy", "passwd")
2038 no = ui.config("http_proxy", "no")
2039 if no is None:
2040 no = os.environ.get("no_proxy")
2041 if no:
2042 no_list = no_list + no.split(",")
2043
2044 no_proxy = 0
2045 for h in no_list:
2046 if (path.startswith("http://" + h + "/") or
2047 path.startswith("http://" + h + ":") or
2048 path == "http://" + h):
2049 no_proxy = 1
2050
2051 # Note: urllib2 takes proxy values from the environment and those will
2052 # take precedence
2053 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
2054 try:
2055 if os.environ.has_key(env):
2056 del os.environ[env]
2057 except OSError:
2058 pass
2059
2060 proxy_handler = urllib2.BaseHandler()
2061 if host and not no_proxy:
2062 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
2063
2064 authinfo = None
2065 if user and passwd:
2066 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
2067 passmgr.add_password(None, host, user, passwd)
2068 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
2069
2070 opener = urllib2.build_opener(proxy_handler, authinfo)
2071 urllib2.install_opener(opener)
2072
2073 def dev(self):
2074 return -1
2075
2076 def do_cmd(self, cmd, **args):
2077 self.ui.debug("sending %s command\n" % cmd)
2078 q = {"cmd": cmd}
2079 q.update(args)
2080 qs = urllib.urlencode(q)
2081 cu = "%s?%s" % (self.url, qs)
2082 resp = urllib2.urlopen(cu)
2083 proto = resp.headers['content-type']
2084
2085 # accept old "text/plain" and "application/hg-changegroup" for now
2086 if not proto.startswith('application/mercurial') and \
2087 not proto.startswith('text/plain') and \
2088 not proto.startswith('application/hg-changegroup'):
2089 raise RepoError("'%s' does not appear to be an hg repository"
2090 % self.url)
2091
2092 if proto.startswith('application/mercurial'):
2093 version = proto[22:]
2094 if float(version) > 0.1:
2095 raise RepoError("'%s' uses newer protocol %s" %
2096 (self.url, version))
2097
2098 return resp
2099
2100 def heads(self):
2101 d = self.do_cmd("heads").read()
2102 try:
2103 return map(bin, d[:-1].split(" "))
2104 except:
2105 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2106 raise
2107
2108 def branches(self, nodes):
2109 n = " ".join(map(hex, nodes))
2110 d = self.do_cmd("branches", nodes=n).read()
2111 try:
2112 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2113 return br
2114 except:
2115 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2116 raise
2117
2118 def between(self, pairs):
2119 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2120 d = self.do_cmd("between", pairs=n).read()
2121 try:
2122 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2123 return p
2124 except:
2125 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2126 raise
2127
2128 def changegroup(self, nodes):
2129 n = " ".join(map(hex, nodes))
2130 f = self.do_cmd("changegroup", roots=n)
2131 bytes = 0
2132
2133 class zread:
2134 def __init__(self, f):
2135 self.zd = zlib.decompressobj()
2136 self.f = f
2137 self.buf = ""
2138 def read(self, l):
2139 while l > len(self.buf):
2140 r = self.f.read(4096)
2141 if r:
2142 self.buf += self.zd.decompress(r)
2143 else:
2144 self.buf += self.zd.flush()
2145 break
2146 d, self.buf = self.buf[:l], self.buf[l:]
2147 return d
2148
2149 return zread(f)
2150
2151 class remotelock:
2152 def __init__(self, repo):
2153 self.repo = repo
2154 def release(self):
2155 self.repo.unlock()
2156 self.repo = None
2157 def __del__(self):
2158 if self.repo:
2159 self.release()
2160
2161 class sshrepository(remoterepository):
2162 def __init__(self, ui, path):
2163 self.url = path
2164 self.ui = ui
2165
2166 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
2167 if not m:
2168 raise RepoError("couldn't parse destination %s" % path)
2169
2170 self.user = m.group(2)
2171 self.host = m.group(3)
2172 self.port = m.group(5)
2173 self.path = m.group(7) or "."
2174
2175 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2176 args = self.port and ("%s -p %s") % (args, self.port) or args
2177
2178 sshcmd = self.ui.config("ui", "ssh", "ssh")
2179 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2180 cmd = "%s %s '%s -R %s serve --stdio'"
2181 cmd = cmd % (sshcmd, args, remotecmd, self.path)
2182
2183 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2184
2185 def readerr(self):
2186 while 1:
2187 r,w,x = select.select([self.pipee], [], [], 0)
2188 if not r: break
2189 l = self.pipee.readline()
2190 if not l: break
2191 self.ui.status("remote: ", l)
2192
2193 def __del__(self):
2194 try:
2195 self.pipeo.close()
2196 self.pipei.close()
2197 for l in self.pipee:
2198 self.ui.status("remote: ", l)
2199 self.pipee.close()
2200 except:
2201 pass
2202
2203 def dev(self):
2204 return -1
2205
2206 def do_cmd(self, cmd, **args):
2207 self.ui.debug("sending %s command\n" % cmd)
2208 self.pipeo.write("%s\n" % cmd)
2209 for k, v in args.items():
2210 self.pipeo.write("%s %d\n" % (k, len(v)))
2211 self.pipeo.write(v)
2212 self.pipeo.flush()
2213
2214 return self.pipei
2215
2216 def call(self, cmd, **args):
2217 r = self.do_cmd(cmd, **args)
2218 l = r.readline()
2219 self.readerr()
2220 try:
2221 l = int(l)
2222 except:
2223 raise RepoError("unexpected response '%s'" % l)
2224 return r.read(l)
2225
2226 def lock(self):
2227 self.call("lock")
2228 return remotelock(self)
2229
2230 def unlock(self):
2231 self.call("unlock")
2232
2233 def heads(self):
2234 d = self.call("heads")
2235 try:
2236 return map(bin, d[:-1].split(" "))
2237 except:
2238 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2239
2240 def branches(self, nodes):
2241 n = " ".join(map(hex, nodes))
2242 d = self.call("branches", nodes=n)
2243 try:
2244 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2245 return br
2246 except:
2247 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2248
2249 def between(self, pairs):
2250 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2251 d = self.call("between", pairs=n)
2252 try:
2253 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2254 return p
2255 except:
2256 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2257
2258 def changegroup(self, nodes):
2259 n = " ".join(map(hex, nodes))
2260 f = self.do_cmd("changegroup", roots=n)
2261 return self.pipei
2262
2263 def addchangegroup(self, cg):
2264 d = self.call("addchangegroup")
2265 if d:
2266 raise RepoError("push refused: %s", d)
2267
2268 while 1:
2269 d = cg.read(4096)
2270 if not d: break
2271 self.pipeo.write(d)
2272 self.readerr()
2273
2274 self.pipeo.flush()
2275
2276 self.readerr()
2277 l = int(self.pipei.readline())
2278 return self.pipei.read(l) != ""
2279
2280 class httpsrepository(httprepository):
2281 pass
2282
2283 def repository(ui, path=None, create=0):
2284 if path:
2285 if path.startswith("http://"):
2286 return httprepository(ui, path)
2287 if path.startswith("https://"):
2288 return httpsrepository(ui, path)
2289 if path.startswith("hg://"):
2290 return httprepository(ui, path.replace("hg://", "http://"))
2291 if path.startswith("old-http://"):
2292 return localrepository(ui, path.replace("old-http://", "http://"))
2293 if path.startswith("ssh://"):
2294 return sshrepository(ui, path)
2295
2296 return localrepository(ui, path, create)
This diff has been collapsed as it changes many lines, (2204 lines changed) Show them Hide them
@@ -1,17 +1,14 b''
1 # hg.py - repository classes for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import sys, struct, os
8 import os
9 import util
10 from revlog import *
9 from revlog import *
11 from demandload import *
10 from demandload import *
12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
11 demandload(globals(), "bdiff")
13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 demandload(globals(), "bisect errno select stat")
15
12
16 class filelog(revlog):
13 class filelog(revlog):
17 def __init__(self, opener, path):
14 def __init__(self, opener, path):
@@ -99,2198 +96,3 b' class filelog(revlog):'
99 hist[n] = curr
96 hist[n] = curr
100
97
101 return zip(hist[n][0], hist[n][1].splitlines(1))
98 return zip(hist[n][0], hist[n][1].splitlines(1))
102
103 class manifest(revlog):
104 def __init__(self, opener):
105 self.mapcache = None
106 self.listcache = None
107 self.addlist = None
108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109
110 def read(self, node):
111 if node == nullid: return {} # don't upset local cache
112 if self.mapcache and self.mapcache[0] == node:
113 return self.mapcache[1]
114 text = self.revision(node)
115 map = {}
116 flag = {}
117 self.listcache = (text, text.splitlines(1))
118 for l in self.listcache[1]:
119 (f, n) = l.split('\0')
120 map[f] = bin(n[:40])
121 flag[f] = (n[40:-1] == "x")
122 self.mapcache = (node, map, flag)
123 return map
124
125 def readflags(self, node):
126 if node == nullid: return {} # don't upset local cache
127 if not self.mapcache or self.mapcache[0] != node:
128 self.read(node)
129 return self.mapcache[2]
130
131 def diff(self, a, b):
132 # this is sneaky, as we're not actually using a and b
133 if self.listcache and self.addlist and self.listcache[0] == a:
134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 if mdiff.patch(a, d) != b:
136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 return mdiff.textdiff(a, b)
138 return d
139 else:
140 return mdiff.textdiff(a, b)
141
142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 changed=None):
144 # directly generate the mdiff delta from the data collected during
145 # the bisect loop below
146 def gendelta(delta):
147 i = 0
148 result = []
149 while i < len(delta):
150 start = delta[i][2]
151 end = delta[i][3]
152 l = delta[i][4]
153 if l == None:
154 l = ""
155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 and end >= delta[i+1][2]:
157 if delta[i+1][3] > end:
158 end = delta[i+1][3]
159 if delta[i+1][4]:
160 l += delta[i+1][4]
161 i += 1
162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 i += 1
164 return result
165
166 # apply the changes collected during the bisect loop to our addlist
167 def addlistdelta(addlist, delta):
168 # apply the deltas to the addlist. start from the bottom up
169 # so changes to the offsets don't mess things up.
170 i = len(delta)
171 while i > 0:
172 i -= 1
173 start = delta[i][0]
174 end = delta[i][1]
175 if delta[i][4]:
176 addlist[start:end] = [delta[i][4]]
177 else:
178 del addlist[start:end]
179 return addlist
180
181 # calculate the byte offset of the start of each line in the
182 # manifest
183 def calcoffsets(addlist):
184 offsets = [0] * (len(addlist) + 1)
185 offset = 0
186 i = 0
187 while i < len(addlist):
188 offsets[i] = offset
189 offset += len(addlist[i])
190 i += 1
191 offsets[i] = offset
192 return offsets
193
194 # if we're using the listcache, make sure it is valid and
195 # parented by the same node we're diffing against
196 if not changed or not self.listcache or not p1 or \
197 self.mapcache[0] != p1:
198 files = map.keys()
199 files.sort()
200
201 self.addlist = ["%s\000%s%s\n" %
202 (f, hex(map[f]), flags[f] and "x" or '')
203 for f in files]
204 cachedelta = None
205 else:
206 addlist = self.listcache[1]
207
208 # find the starting offset for each line in the add list
209 offsets = calcoffsets(addlist)
210
211 # combine the changed lists into one list for sorting
212 work = [[x, 0] for x in changed[0]]
213 work[len(work):] = [[x, 1] for x in changed[1]]
214 work.sort()
215
216 delta = []
217 bs = 0
218
219 for w in work:
220 f = w[0]
221 # bs will either be the index of the item or the insert point
222 bs = bisect.bisect(addlist, f, bs)
223 if bs < len(addlist):
224 fn = addlist[bs][:addlist[bs].index('\0')]
225 else:
226 fn = None
227 if w[1] == 0:
228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 flags[f] and "x" or '')
230 else:
231 l = None
232 start = bs
233 if fn != f:
234 # item not found, insert a new one
235 end = bs
236 if w[1] == 1:
237 sys.stderr.write("failed to remove %s from manifest\n"
238 % f)
239 sys.exit(1)
240 else:
241 # item is found, replace/delete the existing line
242 end = bs + 1
243 delta.append([start, end, offsets[start], offsets[end], l])
244
245 self.addlist = addlistdelta(addlist, delta)
246 if self.mapcache[0] == self.tip():
247 cachedelta = "".join(gendelta(delta))
248 else:
249 cachedelta = None
250
251 text = "".join(self.addlist)
252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 sys.stderr.write("manifest delta failure\n")
254 sys.exit(1)
255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 self.mapcache = (n, map, flags)
257 self.listcache = (text, self.addlist)
258 self.addlist = None
259
260 return n
261
262 class changelog(revlog):
263 def __init__(self, opener):
264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265
266 def extract(self, text):
267 if not text:
268 return (nullid, "", "0", [], "")
269 last = text.index("\n\n")
270 desc = text[last + 2:]
271 l = text[:last].splitlines()
272 manifest = bin(l[0])
273 user = l[1]
274 date = l[2]
275 if " " not in date:
276 date += " 0" # some tools used -d without a timezone
277 files = l[3:]
278 return (manifest, user, date, files, desc)
279
280 def read(self, node):
281 return self.extract(self.revision(node))
282
283 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
284 user=None, date=None):
285 if not date:
286 if time.daylight: offset = time.altzone
287 else: offset = time.timezone
288 date = "%d %d" % (time.time(), offset)
289 list.sort()
290 l = [hex(manifest), user, date] + list + ["", desc]
291 text = "\n".join(l)
292 return self.addrevision(text, transaction, self.count(), p1, p2)
293
294 class dirstate:
295 def __init__(self, opener, ui, root):
296 self.opener = opener
297 self.root = root
298 self.dirty = 0
299 self.ui = ui
300 self.map = None
301 self.pl = None
302 self.copies = {}
303 self.ignorefunc = None
304
305 def wjoin(self, f):
306 return os.path.join(self.root, f)
307
308 def getcwd(self):
309 cwd = os.getcwd()
310 if cwd == self.root: return ''
311 return cwd[len(self.root) + 1:]
312
313 def ignore(self, f):
314 if not self.ignorefunc:
315 bigpat = []
316 try:
317 l = file(self.wjoin(".hgignore"))
318 for pat in l:
319 p = pat.rstrip()
320 if p:
321 try:
322 re.compile(p)
323 except:
324 self.ui.warn("ignoring invalid ignore"
325 + " regular expression '%s'\n" % p)
326 else:
327 bigpat.append(p)
328 except IOError: pass
329
330 if bigpat:
331 s = "(?:%s)" % (")|(?:".join(bigpat))
332 r = re.compile(s)
333 self.ignorefunc = r.search
334 else:
335 self.ignorefunc = util.never
336
337 return self.ignorefunc(f)
338
339 def __del__(self):
340 if self.dirty:
341 self.write()
342
343 def __getitem__(self, key):
344 try:
345 return self.map[key]
346 except TypeError:
347 self.read()
348 return self[key]
349
350 def __contains__(self, key):
351 if not self.map: self.read()
352 return key in self.map
353
354 def parents(self):
355 if not self.pl:
356 self.read()
357 return self.pl
358
359 def markdirty(self):
360 if not self.dirty:
361 self.dirty = 1
362
363 def setparents(self, p1, p2=nullid):
364 self.markdirty()
365 self.pl = p1, p2
366
367 def state(self, key):
368 try:
369 return self[key][0]
370 except KeyError:
371 return "?"
372
373 def read(self):
374 if self.map is not None: return self.map
375
376 self.map = {}
377 self.pl = [nullid, nullid]
378 try:
379 st = self.opener("dirstate").read()
380 if not st: return
381 except: return
382
383 self.pl = [st[:20], st[20: 40]]
384
385 pos = 40
386 while pos < len(st):
387 e = struct.unpack(">cllll", st[pos:pos+17])
388 l = e[4]
389 pos += 17
390 f = st[pos:pos + l]
391 if '\0' in f:
392 f, c = f.split('\0')
393 self.copies[f] = c
394 self.map[f] = e[:4]
395 pos += l
396
397 def copy(self, source, dest):
398 self.read()
399 self.markdirty()
400 self.copies[dest] = source
401
402 def copied(self, file):
403 return self.copies.get(file, None)
404
405 def update(self, files, state, **kw):
406 ''' current states:
407 n normal
408 m needs merging
409 r marked for removal
410 a marked for addition'''
411
412 if not files: return
413 self.read()
414 self.markdirty()
415 for f in files:
416 if state == "r":
417 self.map[f] = ('r', 0, 0, 0)
418 else:
419 s = os.stat(os.path.join(self.root, f))
420 st_size = kw.get('st_size', s.st_size)
421 st_mtime = kw.get('st_mtime', s.st_mtime)
422 self.map[f] = (state, s.st_mode, st_size, st_mtime)
423
424 def forget(self, files):
425 if not files: return
426 self.read()
427 self.markdirty()
428 for f in files:
429 try:
430 del self.map[f]
431 except KeyError:
432 self.ui.warn("not in dirstate: %s!\n" % f)
433 pass
434
435 def clear(self):
436 self.map = {}
437 self.markdirty()
438
439 def write(self):
440 st = self.opener("dirstate", "w")
441 st.write("".join(self.pl))
442 for f, e in self.map.items():
443 c = self.copied(f)
444 if c:
445 f = f + "\0" + c
446 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
447 st.write(e + f)
448 self.dirty = 0
449
450 def filterfiles(self, files):
451 ret = {}
452 unknown = []
453
454 for x in files:
455 if x is '.':
456 return self.map.copy()
457 if x not in self.map:
458 unknown.append(x)
459 else:
460 ret[x] = self.map[x]
461
462 if not unknown:
463 return ret
464
465 b = self.map.keys()
466 b.sort()
467 blen = len(b)
468
469 for x in unknown:
470 bs = bisect.bisect(b, x)
471 if bs != 0 and b[bs-1] == x:
472 ret[x] = self.map[x]
473 continue
474 while bs < blen:
475 s = b[bs]
476 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
477 ret[s] = self.map[s]
478 else:
479 break
480 bs += 1
481 return ret
482
483 def walk(self, files=None, match=util.always, dc=None):
484 self.read()
485
486 # walk all files by default
487 if not files:
488 files = [self.root]
489 if not dc:
490 dc = self.map.copy()
491 elif not dc:
492 dc = self.filterfiles(files)
493
494 known = {'.hg': 1}
495 def seen(fn):
496 if fn in known: return True
497 known[fn] = 1
498 def traverse():
499 for ff in util.unique(files):
500 f = os.path.join(self.root, ff)
501 try:
502 st = os.stat(f)
503 except OSError, inst:
504 if ff not in dc: self.ui.warn('%s: %s\n' % (
505 util.pathto(self.getcwd(), ff),
506 inst.strerror))
507 continue
508 if stat.S_ISDIR(st.st_mode):
509 for dir, subdirs, fl in os.walk(f):
510 d = dir[len(self.root) + 1:]
511 nd = util.normpath(d)
512 if nd == '.': nd = ''
513 if seen(nd):
514 subdirs[:] = []
515 continue
516 for sd in subdirs:
517 ds = os.path.join(nd, sd +'/')
518 if self.ignore(ds) or not match(ds):
519 subdirs.remove(sd)
520 subdirs.sort()
521 fl.sort()
522 for fn in fl:
523 fn = util.pconvert(os.path.join(d, fn))
524 yield 'f', fn
525 elif stat.S_ISREG(st.st_mode):
526 yield 'f', ff
527 else:
528 kind = 'unknown'
529 if stat.S_ISCHR(st.st_mode): kind = 'character device'
530 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
531 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
532 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
533 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
534 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
535 util.pathto(self.getcwd(), ff),
536 kind))
537
538 ks = dc.keys()
539 ks.sort()
540 for k in ks:
541 yield 'm', k
542
543 # yield only files that match: all in dirstate, others only if
544 # not in .hgignore
545
546 for src, fn in util.unique(traverse()):
547 fn = util.normpath(fn)
548 if seen(fn): continue
549 if fn not in dc and self.ignore(fn):
550 continue
551 if match(fn):
552 yield src, fn
553
554 def changes(self, files=None, match=util.always):
555 self.read()
556 if not files:
557 dc = self.map.copy()
558 else:
559 dc = self.filterfiles(files)
560 lookup, modified, added, unknown = [], [], [], []
561 removed, deleted = [], []
562
563 for src, fn in self.walk(files, match, dc=dc):
564 try:
565 s = os.stat(os.path.join(self.root, fn))
566 except OSError:
567 continue
568 if not stat.S_ISREG(s.st_mode):
569 continue
570 c = dc.get(fn)
571 if c:
572 del dc[fn]
573 if c[0] == 'm':
574 modified.append(fn)
575 elif c[0] == 'a':
576 added.append(fn)
577 elif c[0] == 'r':
578 unknown.append(fn)
579 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
580 modified.append(fn)
581 elif c[3] != s.st_mtime:
582 lookup.append(fn)
583 else:
584 unknown.append(fn)
585
586 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
587 if c[0] == 'r':
588 removed.append(fn)
589 else:
590 deleted.append(fn)
591 return (lookup, modified, added, removed + deleted, unknown)
592
593 # used to avoid circular references so destructors work
594 def opener(base):
595 p = base
596 def o(path, mode="r"):
597 if p.startswith("http://"):
598 f = os.path.join(p, urllib.quote(path))
599 return httprangereader.httprangereader(f)
600
601 f = os.path.join(p, path)
602
603 mode += "b" # for that other OS
604
605 if mode[0] != "r":
606 try:
607 s = os.stat(f)
608 except OSError:
609 d = os.path.dirname(f)
610 if not os.path.isdir(d):
611 os.makedirs(d)
612 else:
613 if s.st_nlink > 1:
614 file(f + ".tmp", "wb").write(file(f, "rb").read())
615 util.rename(f+".tmp", f)
616
617 return file(f, mode)
618
619 return o
620
621 class RepoError(Exception): pass
622
623 class localrepository:
624 def __init__(self, ui, path=None, create=0):
625 self.remote = 0
626 if path and path.startswith("http://"):
627 self.remote = 1
628 self.path = path
629 else:
630 if not path:
631 p = os.getcwd()
632 while not os.path.isdir(os.path.join(p, ".hg")):
633 oldp = p
634 p = os.path.dirname(p)
635 if p == oldp: raise RepoError("no repo found")
636 path = p
637 self.path = os.path.join(path, ".hg")
638
639 if not create and not os.path.isdir(self.path):
640 raise RepoError("repository %s not found" % self.path)
641
642 self.root = os.path.abspath(path)
643 self.ui = ui
644
645 if create:
646 os.mkdir(self.path)
647 os.mkdir(self.join("data"))
648
649 self.opener = opener(self.path)
650 self.wopener = opener(self.root)
651 self.manifest = manifest(self.opener)
652 self.changelog = changelog(self.opener)
653 self.tagscache = None
654 self.nodetagscache = None
655
656 if not self.remote:
657 self.dirstate = dirstate(self.opener, ui, self.root)
658 try:
659 self.ui.readconfig(self.opener("hgrc"))
660 except IOError: pass
661
662 def hook(self, name, **args):
663 s = self.ui.config("hooks", name)
664 if s:
665 self.ui.note("running hook %s: %s\n" % (name, s))
666 old = {}
667 for k, v in args.items():
668 k = k.upper()
669 old[k] = os.environ.get(k, None)
670 os.environ[k] = v
671
672 r = os.system(s)
673
674 for k, v in old.items():
675 if v != None:
676 os.environ[k] = v
677 else:
678 del os.environ[k]
679
680 if r:
681 self.ui.warn("abort: %s hook failed with status %d!\n" %
682 (name, r))
683 return False
684 return True
685
686 def tags(self):
687 '''return a mapping of tag to node'''
688 if not self.tagscache:
689 self.tagscache = {}
690 def addtag(self, k, n):
691 try:
692 bin_n = bin(n)
693 except TypeError:
694 bin_n = ''
695 self.tagscache[k.strip()] = bin_n
696
697 try:
698 # read each head of the tags file, ending with the tip
699 # and add each tag found to the map, with "newer" ones
700 # taking precedence
701 fl = self.file(".hgtags")
702 h = fl.heads()
703 h.reverse()
704 for r in h:
705 for l in fl.read(r).splitlines():
706 if l:
707 n, k = l.split(" ", 1)
708 addtag(self, k, n)
709 except KeyError:
710 pass
711
712 try:
713 f = self.opener("localtags")
714 for l in f:
715 n, k = l.split(" ", 1)
716 addtag(self, k, n)
717 except IOError:
718 pass
719
720 self.tagscache['tip'] = self.changelog.tip()
721
722 return self.tagscache
723
724 def tagslist(self):
725 '''return a list of tags ordered by revision'''
726 l = []
727 for t, n in self.tags().items():
728 try:
729 r = self.changelog.rev(n)
730 except:
731 r = -2 # sort to the beginning of the list if unknown
732 l.append((r,t,n))
733 l.sort()
734 return [(t,n) for r,t,n in l]
735
736 def nodetags(self, node):
737 '''return the tags associated with a node'''
738 if not self.nodetagscache:
739 self.nodetagscache = {}
740 for t,n in self.tags().items():
741 self.nodetagscache.setdefault(n,[]).append(t)
742 return self.nodetagscache.get(node, [])
743
744 def lookup(self, key):
745 try:
746 return self.tags()[key]
747 except KeyError:
748 try:
749 return self.changelog.lookup(key)
750 except:
751 raise RepoError("unknown revision '%s'" % key)
752
753 def dev(self):
754 if self.remote: return -1
755 return os.stat(self.path).st_dev
756
757 def local(self):
758 return not self.remote
759
760 def join(self, f):
761 return os.path.join(self.path, f)
762
763 def wjoin(self, f):
764 return os.path.join(self.root, f)
765
766 def file(self, f):
767 if f[0] == '/': f = f[1:]
768 return filelog(self.opener, f)
769
770 def getcwd(self):
771 return self.dirstate.getcwd()
772
773 def wfile(self, f, mode='r'):
774 return self.wopener(f, mode)
775
776 def wread(self, filename):
777 return self.wopener(filename, 'r').read()
778
779 def wwrite(self, filename, data, fd=None):
780 if fd:
781 return fd.write(data)
782 return self.wopener(filename, 'w').write(data)
783
784 def transaction(self):
785 # save dirstate for undo
786 try:
787 ds = self.opener("dirstate").read()
788 except IOError:
789 ds = ""
790 self.opener("journal.dirstate", "w").write(ds)
791
792 def after():
793 util.rename(self.join("journal"), self.join("undo"))
794 util.rename(self.join("journal.dirstate"),
795 self.join("undo.dirstate"))
796
797 return transaction.transaction(self.ui.warn, self.opener,
798 self.join("journal"), after)
799
800 def recover(self):
801 lock = self.lock()
802 if os.path.exists(self.join("journal")):
803 self.ui.status("rolling back interrupted transaction\n")
804 return transaction.rollback(self.opener, self.join("journal"))
805 else:
806 self.ui.warn("no interrupted transaction available\n")
807
808 def undo(self):
809 lock = self.lock()
810 if os.path.exists(self.join("undo")):
811 self.ui.status("rolling back last transaction\n")
812 transaction.rollback(self.opener, self.join("undo"))
813 self.dirstate = None
814 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
815 self.dirstate = dirstate(self.opener, self.ui, self.root)
816 else:
817 self.ui.warn("no undo information available\n")
818
819 def lock(self, wait=1):
820 try:
821 return lock.lock(self.join("lock"), 0)
822 except lock.LockHeld, inst:
823 if wait:
824 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
825 return lock.lock(self.join("lock"), wait)
826 raise inst
827
828 def rawcommit(self, files, text, user, date, p1=None, p2=None):
829 orig_parent = self.dirstate.parents()[0] or nullid
830 p1 = p1 or self.dirstate.parents()[0] or nullid
831 p2 = p2 or self.dirstate.parents()[1] or nullid
832 c1 = self.changelog.read(p1)
833 c2 = self.changelog.read(p2)
834 m1 = self.manifest.read(c1[0])
835 mf1 = self.manifest.readflags(c1[0])
836 m2 = self.manifest.read(c2[0])
837 changed = []
838
839 if orig_parent == p1:
840 update_dirstate = 1
841 else:
842 update_dirstate = 0
843
844 tr = self.transaction()
845 mm = m1.copy()
846 mfm = mf1.copy()
847 linkrev = self.changelog.count()
848 for f in files:
849 try:
850 t = self.wread(f)
851 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
852 r = self.file(f)
853 mfm[f] = tm
854
855 fp1 = m1.get(f, nullid)
856 fp2 = m2.get(f, nullid)
857
858 # is the same revision on two branches of a merge?
859 if fp2 == fp1:
860 fp2 = nullid
861
862 if fp2 != nullid:
863 # is one parent an ancestor of the other?
864 fpa = r.ancestor(fp1, fp2)
865 if fpa == fp1:
866 fp1, fp2 = fp2, nullid
867 elif fpa == fp2:
868 fp2 = nullid
869
870 # is the file unmodified from the parent?
871 if t == r.read(fp1):
872 # record the proper existing parent in manifest
873 # no need to add a revision
874 mm[f] = fp1
875 continue
876
877 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
878 changed.append(f)
879 if update_dirstate:
880 self.dirstate.update([f], "n")
881 except IOError:
882 try:
883 del mm[f]
884 del mfm[f]
885 if update_dirstate:
886 self.dirstate.forget([f])
887 except:
888 # deleted from p2?
889 pass
890
891 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
892 user = user or self.ui.username()
893 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
894 tr.close()
895 if update_dirstate:
896 self.dirstate.setparents(n, nullid)
897
898 def commit(self, files = None, text = "", user = None, date = None,
899 match = util.always, force=False):
900 commit = []
901 remove = []
902 changed = []
903
904 if files:
905 for f in files:
906 s = self.dirstate.state(f)
907 if s in 'nmai':
908 commit.append(f)
909 elif s == 'r':
910 remove.append(f)
911 else:
912 self.ui.warn("%s not tracked!\n" % f)
913 else:
914 (c, a, d, u) = self.changes(match=match)
915 commit = c + a
916 remove = d
917
918 p1, p2 = self.dirstate.parents()
919 c1 = self.changelog.read(p1)
920 c2 = self.changelog.read(p2)
921 m1 = self.manifest.read(c1[0])
922 mf1 = self.manifest.readflags(c1[0])
923 m2 = self.manifest.read(c2[0])
924
925 if not commit and not remove and not force and p2 == nullid:
926 self.ui.status("nothing changed\n")
927 return None
928
929 if not self.hook("precommit"):
930 return None
931
932 lock = self.lock()
933 tr = self.transaction()
934
935 # check in files
936 new = {}
937 linkrev = self.changelog.count()
938 commit.sort()
939 for f in commit:
940 self.ui.note(f + "\n")
941 try:
942 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
943 t = self.wread(f)
944 except IOError:
945 self.ui.warn("trouble committing %s!\n" % f)
946 raise
947
948 meta = {}
949 cp = self.dirstate.copied(f)
950 if cp:
951 meta["copy"] = cp
952 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
953 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
954
955 r = self.file(f)
956 fp1 = m1.get(f, nullid)
957 fp2 = m2.get(f, nullid)
958
959 # is the same revision on two branches of a merge?
960 if fp2 == fp1:
961 fp2 = nullid
962
963 if fp2 != nullid:
964 # is one parent an ancestor of the other?
965 fpa = r.ancestor(fp1, fp2)
966 if fpa == fp1:
967 fp1, fp2 = fp2, nullid
968 elif fpa == fp2:
969 fp2 = nullid
970
971 # is the file unmodified from the parent?
972 if not meta and t == r.read(fp1):
973 # record the proper existing parent in manifest
974 # no need to add a revision
975 new[f] = fp1
976 continue
977
978 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
979 # remember what we've added so that we can later calculate
980 # the files to pull from a set of changesets
981 changed.append(f)
982
983 # update manifest
984 m1.update(new)
985 for f in remove:
986 if f in m1:
987 del m1[f]
988 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
989 (new, remove))
990
991 # add changeset
992 new = new.keys()
993 new.sort()
994
995 if not text:
996 edittext = ""
997 if p2 != nullid:
998 edittext += "HG: branch merge\n"
999 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
1000 edittext += "".join(["HG: changed %s\n" % f for f in changed])
1001 edittext += "".join(["HG: removed %s\n" % f for f in remove])
1002 if not changed and not remove:
1003 edittext += "HG: no files changed\n"
1004 edittext = self.ui.edit(edittext)
1005 if not edittext.rstrip():
1006 return None
1007 text = edittext
1008
1009 user = user or self.ui.username()
1010 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
1011 tr.close()
1012
1013 self.dirstate.setparents(n)
1014 self.dirstate.update(new, "n")
1015 self.dirstate.forget(remove)
1016
1017 if not self.hook("commit", node=hex(n)):
1018 return None
1019 return n
1020
1021 def walk(self, node=None, files=[], match=util.always):
1022 if node:
1023 for fn in self.manifest.read(self.changelog.read(node)[0]):
1024 if match(fn): yield 'm', fn
1025 else:
1026 for src, fn in self.dirstate.walk(files, match):
1027 yield src, fn
1028
1029 def changes(self, node1 = None, node2 = None, files = [],
1030 match = util.always):
1031 mf2, u = None, []
1032
1033 def fcmp(fn, mf):
1034 t1 = self.wread(fn)
1035 t2 = self.file(fn).read(mf.get(fn, nullid))
1036 return cmp(t1, t2)
1037
1038 def mfmatches(node):
1039 mf = dict(self.manifest.read(node))
1040 for fn in mf.keys():
1041 if not match(fn):
1042 del mf[fn]
1043 return mf
1044
1045 # are we comparing the working directory?
1046 if not node2:
1047 l, c, a, d, u = self.dirstate.changes(files, match)
1048
1049 # are we comparing working dir against its parent?
1050 if not node1:
1051 if l:
1052 # do a full compare of any files that might have changed
1053 change = self.changelog.read(self.dirstate.parents()[0])
1054 mf2 = mfmatches(change[0])
1055 for f in l:
1056 if fcmp(f, mf2):
1057 c.append(f)
1058
1059 for l in c, a, d, u:
1060 l.sort()
1061
1062 return (c, a, d, u)
1063
1064 # are we comparing working dir against non-tip?
1065 # generate a pseudo-manifest for the working dir
1066 if not node2:
1067 if not mf2:
1068 change = self.changelog.read(self.dirstate.parents()[0])
1069 mf2 = mfmatches(change[0])
1070 for f in a + c + l:
1071 mf2[f] = ""
1072 for f in d:
1073 if f in mf2: del mf2[f]
1074 else:
1075 change = self.changelog.read(node2)
1076 mf2 = mfmatches(change[0])
1077
1078 # flush lists from dirstate before comparing manifests
1079 c, a = [], []
1080
1081 change = self.changelog.read(node1)
1082 mf1 = mfmatches(change[0])
1083
1084 for fn in mf2:
1085 if mf1.has_key(fn):
1086 if mf1[fn] != mf2[fn]:
1087 if mf2[fn] != "" or fcmp(fn, mf1):
1088 c.append(fn)
1089 del mf1[fn]
1090 else:
1091 a.append(fn)
1092
1093 d = mf1.keys()
1094
1095 for l in c, a, d, u:
1096 l.sort()
1097
1098 return (c, a, d, u)
1099
1100 def add(self, list):
1101 for f in list:
1102 p = self.wjoin(f)
1103 if not os.path.exists(p):
1104 self.ui.warn("%s does not exist!\n" % f)
1105 elif not os.path.isfile(p):
1106 self.ui.warn("%s not added: only files supported currently\n" % f)
1107 elif self.dirstate.state(f) in 'an':
1108 self.ui.warn("%s already tracked!\n" % f)
1109 else:
1110 self.dirstate.update([f], "a")
1111
1112 def forget(self, list):
1113 for f in list:
1114 if self.dirstate.state(f) not in 'ai':
1115 self.ui.warn("%s not added!\n" % f)
1116 else:
1117 self.dirstate.forget([f])
1118
1119 def remove(self, list):
1120 for f in list:
1121 p = self.wjoin(f)
1122 if os.path.exists(p):
1123 self.ui.warn("%s still exists!\n" % f)
1124 elif self.dirstate.state(f) == 'a':
1125 self.ui.warn("%s never committed!\n" % f)
1126 self.dirstate.forget([f])
1127 elif f not in self.dirstate:
1128 self.ui.warn("%s not tracked!\n" % f)
1129 else:
1130 self.dirstate.update([f], "r")
1131
1132 def copy(self, source, dest):
1133 p = self.wjoin(dest)
1134 if not os.path.exists(p):
1135 self.ui.warn("%s does not exist!\n" % dest)
1136 elif not os.path.isfile(p):
1137 self.ui.warn("copy failed: %s is not a file\n" % dest)
1138 else:
1139 if self.dirstate.state(dest) == '?':
1140 self.dirstate.update([dest], "a")
1141 self.dirstate.copy(source, dest)
1142
1143 def heads(self):
1144 return self.changelog.heads()
1145
1146 # branchlookup returns a dict giving a list of branches for
1147 # each head. A branch is defined as the tag of a node or
1148 # the branch of the node's parents. If a node has multiple
1149 # branch tags, tags are eliminated if they are visible from other
1150 # branch tags.
1151 #
1152 # So, for this graph: a->b->c->d->e
1153 # \ /
1154 # aa -----/
1155 # a has tag 2.6.12
1156 # d has tag 2.6.13
1157 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1158 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1159 # from the list.
1160 #
1161 # It is possible that more than one head will have the same branch tag.
1162 # callers need to check the result for multiple heads under the same
1163 # branch tag if that is a problem for them (ie checkout of a specific
1164 # branch).
1165 #
1166 # passing in a specific branch will limit the depth of the search
1167 # through the parents. It won't limit the branches returned in the
1168 # result though.
1169 def branchlookup(self, heads=None, branch=None):
1170 if not heads:
1171 heads = self.heads()
1172 headt = [ h for h in heads ]
1173 chlog = self.changelog
1174 branches = {}
1175 merges = []
1176 seenmerge = {}
1177
1178 # traverse the tree once for each head, recording in the branches
1179 # dict which tags are visible from this head. The branches
1180 # dict also records which tags are visible from each tag
1181 # while we traverse.
1182 while headt or merges:
1183 if merges:
1184 n, found = merges.pop()
1185 visit = [n]
1186 else:
1187 h = headt.pop()
1188 visit = [h]
1189 found = [h]
1190 seen = {}
1191 while visit:
1192 n = visit.pop()
1193 if n in seen:
1194 continue
1195 pp = chlog.parents(n)
1196 tags = self.nodetags(n)
1197 if tags:
1198 for x in tags:
1199 if x == 'tip':
1200 continue
1201 for f in found:
1202 branches.setdefault(f, {})[n] = 1
1203 branches.setdefault(n, {})[n] = 1
1204 break
1205 if n not in found:
1206 found.append(n)
1207 if branch in tags:
1208 continue
1209 seen[n] = 1
1210 if pp[1] != nullid and n not in seenmerge:
1211 merges.append((pp[1], [x for x in found]))
1212 seenmerge[n] = 1
1213 if pp[0] != nullid:
1214 visit.append(pp[0])
1215 # traverse the branches dict, eliminating branch tags from each
1216 # head that are visible from another branch tag for that head.
1217 out = {}
1218 viscache = {}
1219 for h in heads:
1220 def visible(node):
1221 if node in viscache:
1222 return viscache[node]
1223 ret = {}
1224 visit = [node]
1225 while visit:
1226 x = visit.pop()
1227 if x in viscache:
1228 ret.update(viscache[x])
1229 elif x not in ret:
1230 ret[x] = 1
1231 if x in branches:
1232 visit[len(visit):] = branches[x].keys()
1233 viscache[node] = ret
1234 return ret
1235 if h not in branches:
1236 continue
1237 # O(n^2), but somewhat limited. This only searches the
1238 # tags visible from a specific head, not all the tags in the
1239 # whole repo.
1240 for b in branches[h]:
1241 vis = False
1242 for bb in branches[h].keys():
1243 if b != bb:
1244 if b in visible(bb):
1245 vis = True
1246 break
1247 if not vis:
1248 l = out.setdefault(h, [])
1249 l[len(l):] = self.nodetags(b)
1250 return out
1251
1252 def branches(self, nodes):
1253 if not nodes: nodes = [self.changelog.tip()]
1254 b = []
1255 for n in nodes:
1256 t = n
1257 while n:
1258 p = self.changelog.parents(n)
1259 if p[1] != nullid or p[0] == nullid:
1260 b.append((t, n, p[0], p[1]))
1261 break
1262 n = p[0]
1263 return b
1264
1265 def between(self, pairs):
1266 r = []
1267
1268 for top, bottom in pairs:
1269 n, l, i = top, [], 0
1270 f = 1
1271
1272 while n != bottom:
1273 p = self.changelog.parents(n)[0]
1274 if i == f:
1275 l.append(n)
1276 f = f * 2
1277 n = p
1278 i += 1
1279
1280 r.append(l)
1281
1282 return r
1283
1284 def newer(self, nodes):
1285 m = {}
1286 nl = []
1287 pm = {}
1288 cl = self.changelog
1289 t = l = cl.count()
1290
1291 # find the lowest numbered node
1292 for n in nodes:
1293 l = min(l, cl.rev(n))
1294 m[n] = 1
1295
1296 for i in xrange(l, t):
1297 n = cl.node(i)
1298 if n in m: # explicitly listed
1299 pm[n] = 1
1300 nl.append(n)
1301 continue
1302 for p in cl.parents(n):
1303 if p in pm: # parent listed
1304 pm[n] = 1
1305 nl.append(n)
1306 break
1307
1308 return nl
1309
1310 def findincoming(self, remote, base=None, heads=None):
1311 m = self.changelog.nodemap
1312 search = []
1313 fetch = {}
1314 seen = {}
1315 seenbranch = {}
1316 if base == None:
1317 base = {}
1318
1319 # assume we're closer to the tip than the root
1320 # and start by examining the heads
1321 self.ui.status("searching for changes\n")
1322
1323 if not heads:
1324 heads = remote.heads()
1325
1326 unknown = []
1327 for h in heads:
1328 if h not in m:
1329 unknown.append(h)
1330 else:
1331 base[h] = 1
1332
1333 if not unknown:
1334 return None
1335
1336 rep = {}
1337 reqcnt = 0
1338
1339 # search through remote branches
1340 # a 'branch' here is a linear segment of history, with four parts:
1341 # head, root, first parent, second parent
1342 # (a branch always has two parents (or none) by definition)
1343 unknown = remote.branches(unknown)
1344 while unknown:
1345 r = []
1346 while unknown:
1347 n = unknown.pop(0)
1348 if n[0] in seen:
1349 continue
1350
1351 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1352 if n[0] == nullid:
1353 break
1354 if n in seenbranch:
1355 self.ui.debug("branch already found\n")
1356 continue
1357 if n[1] and n[1] in m: # do we know the base?
1358 self.ui.debug("found incomplete branch %s:%s\n"
1359 % (short(n[0]), short(n[1])))
1360 search.append(n) # schedule branch range for scanning
1361 seenbranch[n] = 1
1362 else:
1363 if n[1] not in seen and n[1] not in fetch:
1364 if n[2] in m and n[3] in m:
1365 self.ui.debug("found new changeset %s\n" %
1366 short(n[1]))
1367 fetch[n[1]] = 1 # earliest unknown
1368 base[n[2]] = 1 # latest known
1369 continue
1370
1371 for a in n[2:4]:
1372 if a not in rep:
1373 r.append(a)
1374 rep[a] = 1
1375
1376 seen[n[0]] = 1
1377
1378 if r:
1379 reqcnt += 1
1380 self.ui.debug("request %d: %s\n" %
1381 (reqcnt, " ".join(map(short, r))))
1382 for p in range(0, len(r), 10):
1383 for b in remote.branches(r[p:p+10]):
1384 self.ui.debug("received %s:%s\n" %
1385 (short(b[0]), short(b[1])))
1386 if b[0] in m:
1387 self.ui.debug("found base node %s\n" % short(b[0]))
1388 base[b[0]] = 1
1389 elif b[0] not in seen:
1390 unknown.append(b)
1391
1392 # do binary search on the branches we found
1393 while search:
1394 n = search.pop(0)
1395 reqcnt += 1
1396 l = remote.between([(n[0], n[1])])[0]
1397 l.append(n[1])
1398 p = n[0]
1399 f = 1
1400 for i in l:
1401 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1402 if i in m:
1403 if f <= 2:
1404 self.ui.debug("found new branch changeset %s\n" %
1405 short(p))
1406 fetch[p] = 1
1407 base[i] = 1
1408 else:
1409 self.ui.debug("narrowed branch search to %s:%s\n"
1410 % (short(p), short(i)))
1411 search.append((p, i))
1412 break
1413 p, f = i, f * 2
1414
1415 # sanity check our fetch list
1416 for f in fetch.keys():
1417 if f in m:
1418 raise RepoError("already have changeset " + short(f[:4]))
1419
1420 if base.keys() == [nullid]:
1421 self.ui.warn("warning: pulling from an unrelated repository!\n")
1422
1423 self.ui.note("found new changesets starting at " +
1424 " ".join([short(f) for f in fetch]) + "\n")
1425
1426 self.ui.debug("%d total queries\n" % reqcnt)
1427
1428 return fetch.keys()
1429
1430 def findoutgoing(self, remote, base=None, heads=None):
1431 if base == None:
1432 base = {}
1433 self.findincoming(remote, base, heads)
1434
1435 self.ui.debug("common changesets up to "
1436 + " ".join(map(short, base.keys())) + "\n")
1437
1438 remain = dict.fromkeys(self.changelog.nodemap)
1439
1440 # prune everything remote has from the tree
1441 del remain[nullid]
1442 remove = base.keys()
1443 while remove:
1444 n = remove.pop(0)
1445 if n in remain:
1446 del remain[n]
1447 for p in self.changelog.parents(n):
1448 remove.append(p)
1449
1450 # find every node whose parents have been pruned
1451 subset = []
1452 for n in remain:
1453 p1, p2 = self.changelog.parents(n)
1454 if p1 not in remain and p2 not in remain:
1455 subset.append(n)
1456
1457 # this is the set of all roots we have to push
1458 return subset
1459
1460 def pull(self, remote):
1461 lock = self.lock()
1462
1463 # if we have an empty repo, fetch everything
1464 if self.changelog.tip() == nullid:
1465 self.ui.status("requesting all changes\n")
1466 fetch = [nullid]
1467 else:
1468 fetch = self.findincoming(remote)
1469
1470 if not fetch:
1471 self.ui.status("no changes found\n")
1472 return 1
1473
1474 cg = remote.changegroup(fetch)
1475 return self.addchangegroup(cg)
1476
1477 def push(self, remote, force=False):
1478 lock = remote.lock()
1479
1480 base = {}
1481 heads = remote.heads()
1482 inc = self.findincoming(remote, base, heads)
1483 if not force and inc:
1484 self.ui.warn("abort: unsynced remote changes!\n")
1485 self.ui.status("(did you forget to sync? use push -f to force)\n")
1486 return 1
1487
1488 update = self.findoutgoing(remote, base)
1489 if not update:
1490 self.ui.status("no changes found\n")
1491 return 1
1492 elif not force:
1493 if len(heads) < len(self.changelog.heads()):
1494 self.ui.warn("abort: push creates new remote branches!\n")
1495 self.ui.status("(did you forget to merge?" +
1496 " use push -f to force)\n")
1497 return 1
1498
1499 cg = self.changegroup(update)
1500 return remote.addchangegroup(cg)
1501
1502 def changegroup(self, basenodes):
1503 class genread:
1504 def __init__(self, generator):
1505 self.g = generator
1506 self.buf = ""
1507 def fillbuf(self):
1508 self.buf += "".join(self.g)
1509
1510 def read(self, l):
1511 while l > len(self.buf):
1512 try:
1513 self.buf += self.g.next()
1514 except StopIteration:
1515 break
1516 d, self.buf = self.buf[:l], self.buf[l:]
1517 return d
1518
1519 def gengroup():
1520 nodes = self.newer(basenodes)
1521
1522 # construct the link map
1523 linkmap = {}
1524 for n in nodes:
1525 linkmap[self.changelog.rev(n)] = n
1526
1527 # construct a list of all changed files
1528 changed = {}
1529 for n in nodes:
1530 c = self.changelog.read(n)
1531 for f in c[3]:
1532 changed[f] = 1
1533 changed = changed.keys()
1534 changed.sort()
1535
1536 # the changegroup is changesets + manifests + all file revs
1537 revs = [ self.changelog.rev(n) for n in nodes ]
1538
1539 for y in self.changelog.group(linkmap): yield y
1540 for y in self.manifest.group(linkmap): yield y
1541 for f in changed:
1542 yield struct.pack(">l", len(f) + 4) + f
1543 g = self.file(f).group(linkmap)
1544 for y in g:
1545 yield y
1546
1547 yield struct.pack(">l", 0)
1548
1549 return genread(gengroup())
1550
1551 def addchangegroup(self, source):
1552
1553 def getchunk():
1554 d = source.read(4)
1555 if not d: return ""
1556 l = struct.unpack(">l", d)[0]
1557 if l <= 4: return ""
1558 return source.read(l - 4)
1559
1560 def getgroup():
1561 while 1:
1562 c = getchunk()
1563 if not c: break
1564 yield c
1565
1566 def csmap(x):
1567 self.ui.debug("add changeset %s\n" % short(x))
1568 return self.changelog.count()
1569
1570 def revmap(x):
1571 return self.changelog.rev(x)
1572
1573 if not source: return
1574 changesets = files = revisions = 0
1575
1576 tr = self.transaction()
1577
1578 oldheads = len(self.changelog.heads())
1579
1580 # pull off the changeset group
1581 self.ui.status("adding changesets\n")
1582 co = self.changelog.tip()
1583 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1584 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1585
1586 # pull off the manifest group
1587 self.ui.status("adding manifests\n")
1588 mm = self.manifest.tip()
1589 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1590
1591 # process the files
1592 self.ui.status("adding file changes\n")
1593 while 1:
1594 f = getchunk()
1595 if not f: break
1596 self.ui.debug("adding %s revisions\n" % f)
1597 fl = self.file(f)
1598 o = fl.count()
1599 n = fl.addgroup(getgroup(), revmap, tr)
1600 revisions += fl.count() - o
1601 files += 1
1602
1603 newheads = len(self.changelog.heads())
1604 heads = ""
1605 if oldheads and newheads > oldheads:
1606 heads = " (+%d heads)" % (newheads - oldheads)
1607
1608 self.ui.status(("added %d changesets" +
1609 " with %d changes to %d files%s\n")
1610 % (changesets, revisions, files, heads))
1611
1612 tr.close()
1613
1614 if not self.hook("changegroup"):
1615 return 1
1616
1617 return
1618
1619 def update(self, node, allow=False, force=False, choose=None,
1620 moddirstate=True):
1621 pl = self.dirstate.parents()
1622 if not force and pl[1] != nullid:
1623 self.ui.warn("aborting: outstanding uncommitted merges\n")
1624 return 1
1625
1626 p1, p2 = pl[0], node
1627 pa = self.changelog.ancestor(p1, p2)
1628 m1n = self.changelog.read(p1)[0]
1629 m2n = self.changelog.read(p2)[0]
1630 man = self.manifest.ancestor(m1n, m2n)
1631 m1 = self.manifest.read(m1n)
1632 mf1 = self.manifest.readflags(m1n)
1633 m2 = self.manifest.read(m2n)
1634 mf2 = self.manifest.readflags(m2n)
1635 ma = self.manifest.read(man)
1636 mfa = self.manifest.readflags(man)
1637
1638 (c, a, d, u) = self.changes()
1639
1640 # is this a jump, or a merge? i.e. is there a linear path
1641 # from p1 to p2?
1642 linear_path = (pa == p1 or pa == p2)
1643
1644 # resolve the manifest to determine which files
1645 # we care about merging
1646 self.ui.note("resolving manifests\n")
1647 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1648 (force, allow, moddirstate, linear_path))
1649 self.ui.debug(" ancestor %s local %s remote %s\n" %
1650 (short(man), short(m1n), short(m2n)))
1651
1652 merge = {}
1653 get = {}
1654 remove = []
1655
1656 # construct a working dir manifest
1657 mw = m1.copy()
1658 mfw = mf1.copy()
1659 umap = dict.fromkeys(u)
1660
1661 for f in a + c + u:
1662 mw[f] = ""
1663 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1664
1665 for f in d:
1666 if f in mw: del mw[f]
1667
1668 # If we're jumping between revisions (as opposed to merging),
1669 # and if neither the working directory nor the target rev has
1670 # the file, then we need to remove it from the dirstate, to
1671 # prevent the dirstate from listing the file when it is no
1672 # longer in the manifest.
1673 if moddirstate and linear_path and f not in m2:
1674 self.dirstate.forget((f,))
1675
1676 # Compare manifests
1677 for f, n in mw.iteritems():
1678 if choose and not choose(f): continue
1679 if f in m2:
1680 s = 0
1681
1682 # is the wfile new since m1, and match m2?
1683 if f not in m1:
1684 t1 = self.wread(f)
1685 t2 = self.file(f).read(m2[f])
1686 if cmp(t1, t2) == 0:
1687 n = m2[f]
1688 del t1, t2
1689
1690 # are files different?
1691 if n != m2[f]:
1692 a = ma.get(f, nullid)
1693 # are both different from the ancestor?
1694 if n != a and m2[f] != a:
1695 self.ui.debug(" %s versions differ, resolve\n" % f)
1696 # merge executable bits
1697 # "if we changed or they changed, change in merge"
1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1699 mode = ((a^b) | (a^c)) ^ a
1700 merge[f] = (m1.get(f, nullid), m2[f], mode)
1701 s = 1
1702 # are we clobbering?
1703 # is remote's version newer?
1704 # or are we going back in time?
1705 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1706 self.ui.debug(" remote %s is newer, get\n" % f)
1707 get[f] = m2[f]
1708 s = 1
1709 elif f in umap:
1710 # this unknown file is the same as the checkout
1711 get[f] = m2[f]
1712
1713 if not s and mfw[f] != mf2[f]:
1714 if force:
1715 self.ui.debug(" updating permissions for %s\n" % f)
1716 util.set_exec(self.wjoin(f), mf2[f])
1717 else:
1718 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1719 mode = ((a^b) | (a^c)) ^ a
1720 if mode != b:
1721 self.ui.debug(" updating permissions for %s\n" % f)
1722 util.set_exec(self.wjoin(f), mode)
1723 del m2[f]
1724 elif f in ma:
1725 if n != ma[f]:
1726 r = "d"
1727 if not force and (linear_path or allow):
1728 r = self.ui.prompt(
1729 (" local changed %s which remote deleted\n" % f) +
1730 "(k)eep or (d)elete?", "[kd]", "k")
1731 if r == "d":
1732 remove.append(f)
1733 else:
1734 self.ui.debug("other deleted %s\n" % f)
1735 remove.append(f) # other deleted it
1736 else:
1737 if n == m1.get(f, nullid): # same as parent
1738 if p2 == pa: # going backwards?
1739 self.ui.debug("remote deleted %s\n" % f)
1740 remove.append(f)
1741 else:
1742 self.ui.debug("local created %s, keeping\n" % f)
1743 else:
1744 self.ui.debug("working dir created %s, keeping\n" % f)
1745
1746 for f, n in m2.iteritems():
1747 if choose and not choose(f): continue
1748 if f[0] == "/": continue
1749 if f in ma and n != ma[f]:
1750 r = "k"
1751 if not force and (linear_path or allow):
1752 r = self.ui.prompt(
1753 ("remote changed %s which local deleted\n" % f) +
1754 "(k)eep or (d)elete?", "[kd]", "k")
1755 if r == "k": get[f] = n
1756 elif f not in ma:
1757 self.ui.debug("remote created %s\n" % f)
1758 get[f] = n
1759 else:
1760 if force or p2 == pa: # going backwards?
1761 self.ui.debug("local deleted %s, recreating\n" % f)
1762 get[f] = n
1763 else:
1764 self.ui.debug("local deleted %s\n" % f)
1765
1766 del mw, m1, m2, ma
1767
1768 if force:
1769 for f in merge:
1770 get[f] = merge[f][1]
1771 merge = {}
1772
1773 if linear_path or force:
1774 # we don't need to do any magic, just jump to the new rev
1775 branch_merge = False
1776 p1, p2 = p2, nullid
1777 else:
1778 if not allow:
1779 self.ui.status("this update spans a branch" +
1780 " affecting the following files:\n")
1781 fl = merge.keys() + get.keys()
1782 fl.sort()
1783 for f in fl:
1784 cf = ""
1785 if f in merge: cf = " (resolve)"
1786 self.ui.status(" %s%s\n" % (f, cf))
1787 self.ui.warn("aborting update spanning branches!\n")
1788 self.ui.status("(use update -m to merge across branches" +
1789 " or -C to lose changes)\n")
1790 return 1
1791 branch_merge = True
1792
1793 if moddirstate:
1794 self.dirstate.setparents(p1, p2)
1795
1796 # get the files we don't need to change
1797 files = get.keys()
1798 files.sort()
1799 for f in files:
1800 if f[0] == "/": continue
1801 self.ui.note("getting %s\n" % f)
1802 t = self.file(f).read(get[f])
1803 try:
1804 self.wwrite(f, t)
1805 except IOError:
1806 os.makedirs(os.path.dirname(self.wjoin(f)))
1807 self.wwrite(f, t)
1808 util.set_exec(self.wjoin(f), mf2[f])
1809 if moddirstate:
1810 if branch_merge:
1811 self.dirstate.update([f], 'n', st_mtime=-1)
1812 else:
1813 self.dirstate.update([f], 'n')
1814
1815 # merge the tricky bits
1816 files = merge.keys()
1817 files.sort()
1818 for f in files:
1819 self.ui.status("merging %s\n" % f)
1820 my, other, flag = merge[f]
1821 self.merge3(f, my, other)
1822 util.set_exec(self.wjoin(f), flag)
1823 if moddirstate:
1824 if branch_merge:
1825 # We've done a branch merge, mark this file as merged
1826 # so that we properly record the merger later
1827 self.dirstate.update([f], 'm')
1828 else:
1829 # We've update-merged a locally modified file, so
1830 # we set the dirstate to emulate a normal checkout
1831 # of that file some time in the past. Thus our
1832 # merge will appear as a normal local file
1833 # modification.
1834 f_len = len(self.file(f).read(other))
1835 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1836
1837 remove.sort()
1838 for f in remove:
1839 self.ui.note("removing %s\n" % f)
1840 try:
1841 os.unlink(self.wjoin(f))
1842 except OSError, inst:
1843 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1844 # try removing directories that might now be empty
1845 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1846 except: pass
1847 if moddirstate:
1848 if branch_merge:
1849 self.dirstate.update(remove, 'r')
1850 else:
1851 self.dirstate.forget(remove)
1852
1853 def merge3(self, fn, my, other):
1854 """perform a 3-way merge in the working directory"""
1855
1856 def temp(prefix, node):
1857 pre = "%s~%s." % (os.path.basename(fn), prefix)
1858 (fd, name) = tempfile.mkstemp("", pre)
1859 f = os.fdopen(fd, "wb")
1860 self.wwrite(fn, fl.read(node), f)
1861 f.close()
1862 return name
1863
1864 fl = self.file(fn)
1865 base = fl.ancestor(my, other)
1866 a = self.wjoin(fn)
1867 b = temp("base", base)
1868 c = temp("other", other)
1869
1870 self.ui.note("resolving %s\n" % fn)
1871 self.ui.debug("file %s: other %s ancestor %s\n" %
1872 (fn, short(other), short(base)))
1873
1874 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1875 or "hgmerge")
1876 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1877 if r:
1878 self.ui.warn("merging %s failed!\n" % fn)
1879
1880 os.unlink(b)
1881 os.unlink(c)
1882
1883 def verify(self):
1884 filelinkrevs = {}
1885 filenodes = {}
1886 changesets = revisions = files = 0
1887 errors = 0
1888
1889 seen = {}
1890 self.ui.status("checking changesets\n")
1891 for i in range(self.changelog.count()):
1892 changesets += 1
1893 n = self.changelog.node(i)
1894 if n in seen:
1895 self.ui.warn("duplicate changeset at revision %d\n" % i)
1896 errors += 1
1897 seen[n] = 1
1898
1899 for p in self.changelog.parents(n):
1900 if p not in self.changelog.nodemap:
1901 self.ui.warn("changeset %s has unknown parent %s\n" %
1902 (short(n), short(p)))
1903 errors += 1
1904 try:
1905 changes = self.changelog.read(n)
1906 except Exception, inst:
1907 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1908 errors += 1
1909
1910 for f in changes[3]:
1911 filelinkrevs.setdefault(f, []).append(i)
1912
1913 seen = {}
1914 self.ui.status("checking manifests\n")
1915 for i in range(self.manifest.count()):
1916 n = self.manifest.node(i)
1917 if n in seen:
1918 self.ui.warn("duplicate manifest at revision %d\n" % i)
1919 errors += 1
1920 seen[n] = 1
1921
1922 for p in self.manifest.parents(n):
1923 if p not in self.manifest.nodemap:
1924 self.ui.warn("manifest %s has unknown parent %s\n" %
1925 (short(n), short(p)))
1926 errors += 1
1927
1928 try:
1929 delta = mdiff.patchtext(self.manifest.delta(n))
1930 except KeyboardInterrupt:
1931 self.ui.warn("aborted")
1932 sys.exit(0)
1933 except Exception, inst:
1934 self.ui.warn("unpacking manifest %s: %s\n"
1935 % (short(n), inst))
1936 errors += 1
1937
1938 ff = [ l.split('\0') for l in delta.splitlines() ]
1939 for f, fn in ff:
1940 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1941
1942 self.ui.status("crosschecking files in changesets and manifests\n")
1943 for f in filenodes:
1944 if f not in filelinkrevs:
1945 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1946 errors += 1
1947
1948 for f in filelinkrevs:
1949 if f not in filenodes:
1950 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1951 errors += 1
1952
1953 self.ui.status("checking files\n")
1954 ff = filenodes.keys()
1955 ff.sort()
1956 for f in ff:
1957 if f == "/dev/null": continue
1958 files += 1
1959 fl = self.file(f)
1960 nodes = { nullid: 1 }
1961 seen = {}
1962 for i in range(fl.count()):
1963 revisions += 1
1964 n = fl.node(i)
1965
1966 if n in seen:
1967 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1968 errors += 1
1969
1970 if n not in filenodes[f]:
1971 self.ui.warn("%s: %d:%s not in manifests\n"
1972 % (f, i, short(n)))
1973 errors += 1
1974 else:
1975 del filenodes[f][n]
1976
1977 flr = fl.linkrev(n)
1978 if flr not in filelinkrevs[f]:
1979 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1980 % (f, short(n), fl.linkrev(n)))
1981 errors += 1
1982 else:
1983 filelinkrevs[f].remove(flr)
1984
1985 # verify contents
1986 try:
1987 t = fl.read(n)
1988 except Exception, inst:
1989 self.ui.warn("unpacking file %s %s: %s\n"
1990 % (f, short(n), inst))
1991 errors += 1
1992
1993 # verify parents
1994 (p1, p2) = fl.parents(n)
1995 if p1 not in nodes:
1996 self.ui.warn("file %s:%s unknown parent 1 %s" %
1997 (f, short(n), short(p1)))
1998 errors += 1
1999 if p2 not in nodes:
2000 self.ui.warn("file %s:%s unknown parent 2 %s" %
2001 (f, short(n), short(p1)))
2002 errors += 1
2003 nodes[n] = 1
2004
2005 # cross-check
2006 for node in filenodes[f]:
2007 self.ui.warn("node %s in manifests not in %s\n"
2008 % (hex(node), f))
2009 errors += 1
2010
2011 self.ui.status("%d files, %d changesets, %d total revisions\n" %
2012 (files, changesets, revisions))
2013
2014 if errors:
2015 self.ui.warn("%d integrity errors encountered!\n" % errors)
2016 return 1
2017
2018 class remoterepository:
2019 def local(self):
2020 return False
2021
2022 class httprepository(remoterepository):
2023 def __init__(self, ui, path):
2024 # fix missing / after hostname
2025 s = urlparse.urlsplit(path)
2026 partial = s[2]
2027 if not partial: partial = "/"
2028 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
2029 self.ui = ui
2030 no_list = [ "localhost", "127.0.0.1" ]
2031 host = ui.config("http_proxy", "host")
2032 if host is None:
2033 host = os.environ.get("http_proxy")
2034 if host and host.startswith('http://'):
2035 host = host[7:]
2036 user = ui.config("http_proxy", "user")
2037 passwd = ui.config("http_proxy", "passwd")
2038 no = ui.config("http_proxy", "no")
2039 if no is None:
2040 no = os.environ.get("no_proxy")
2041 if no:
2042 no_list = no_list + no.split(",")
2043
2044 no_proxy = 0
2045 for h in no_list:
2046 if (path.startswith("http://" + h + "/") or
2047 path.startswith("http://" + h + ":") or
2048 path == "http://" + h):
2049 no_proxy = 1
2050
2051 # Note: urllib2 takes proxy values from the environment and those will
2052 # take precedence
2053 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
2054 try:
2055 if os.environ.has_key(env):
2056 del os.environ[env]
2057 except OSError:
2058 pass
2059
2060 proxy_handler = urllib2.BaseHandler()
2061 if host and not no_proxy:
2062 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
2063
2064 authinfo = None
2065 if user and passwd:
2066 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
2067 passmgr.add_password(None, host, user, passwd)
2068 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
2069
2070 opener = urllib2.build_opener(proxy_handler, authinfo)
2071 urllib2.install_opener(opener)
2072
2073 def dev(self):
2074 return -1
2075
2076 def do_cmd(self, cmd, **args):
2077 self.ui.debug("sending %s command\n" % cmd)
2078 q = {"cmd": cmd}
2079 q.update(args)
2080 qs = urllib.urlencode(q)
2081 cu = "%s?%s" % (self.url, qs)
2082 resp = urllib2.urlopen(cu)
2083 proto = resp.headers['content-type']
2084
2085 # accept old "text/plain" and "application/hg-changegroup" for now
2086 if not proto.startswith('application/mercurial') and \
2087 not proto.startswith('text/plain') and \
2088 not proto.startswith('application/hg-changegroup'):
2089 raise RepoError("'%s' does not appear to be an hg repository"
2090 % self.url)
2091
2092 if proto.startswith('application/mercurial'):
2093 version = proto[22:]
2094 if float(version) > 0.1:
2095 raise RepoError("'%s' uses newer protocol %s" %
2096 (self.url, version))
2097
2098 return resp
2099
2100 def heads(self):
2101 d = self.do_cmd("heads").read()
2102 try:
2103 return map(bin, d[:-1].split(" "))
2104 except:
2105 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2106 raise
2107
2108 def branches(self, nodes):
2109 n = " ".join(map(hex, nodes))
2110 d = self.do_cmd("branches", nodes=n).read()
2111 try:
2112 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2113 return br
2114 except:
2115 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2116 raise
2117
2118 def between(self, pairs):
2119 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2120 d = self.do_cmd("between", pairs=n).read()
2121 try:
2122 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2123 return p
2124 except:
2125 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2126 raise
2127
2128 def changegroup(self, nodes):
2129 n = " ".join(map(hex, nodes))
2130 f = self.do_cmd("changegroup", roots=n)
2131 bytes = 0
2132
2133 class zread:
2134 def __init__(self, f):
2135 self.zd = zlib.decompressobj()
2136 self.f = f
2137 self.buf = ""
2138 def read(self, l):
2139 while l > len(self.buf):
2140 r = self.f.read(4096)
2141 if r:
2142 self.buf += self.zd.decompress(r)
2143 else:
2144 self.buf += self.zd.flush()
2145 break
2146 d, self.buf = self.buf[:l], self.buf[l:]
2147 return d
2148
2149 return zread(f)
2150
2151 class remotelock:
2152 def __init__(self, repo):
2153 self.repo = repo
2154 def release(self):
2155 self.repo.unlock()
2156 self.repo = None
2157 def __del__(self):
2158 if self.repo:
2159 self.release()
2160
2161 class sshrepository(remoterepository):
2162 def __init__(self, ui, path):
2163 self.url = path
2164 self.ui = ui
2165
2166 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
2167 if not m:
2168 raise RepoError("couldn't parse destination %s" % path)
2169
2170 self.user = m.group(2)
2171 self.host = m.group(3)
2172 self.port = m.group(5)
2173 self.path = m.group(7) or "."
2174
2175 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2176 args = self.port and ("%s -p %s") % (args, self.port) or args
2177
2178 sshcmd = self.ui.config("ui", "ssh", "ssh")
2179 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2180 cmd = "%s %s '%s -R %s serve --stdio'"
2181 cmd = cmd % (sshcmd, args, remotecmd, self.path)
2182
2183 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2184
2185 def readerr(self):
2186 while 1:
2187 r,w,x = select.select([self.pipee], [], [], 0)
2188 if not r: break
2189 l = self.pipee.readline()
2190 if not l: break
2191 self.ui.status("remote: ", l)
2192
2193 def __del__(self):
2194 try:
2195 self.pipeo.close()
2196 self.pipei.close()
2197 for l in self.pipee:
2198 self.ui.status("remote: ", l)
2199 self.pipee.close()
2200 except:
2201 pass
2202
2203 def dev(self):
2204 return -1
2205
2206 def do_cmd(self, cmd, **args):
2207 self.ui.debug("sending %s command\n" % cmd)
2208 self.pipeo.write("%s\n" % cmd)
2209 for k, v in args.items():
2210 self.pipeo.write("%s %d\n" % (k, len(v)))
2211 self.pipeo.write(v)
2212 self.pipeo.flush()
2213
2214 return self.pipei
2215
2216 def call(self, cmd, **args):
2217 r = self.do_cmd(cmd, **args)
2218 l = r.readline()
2219 self.readerr()
2220 try:
2221 l = int(l)
2222 except:
2223 raise RepoError("unexpected response '%s'" % l)
2224 return r.read(l)
2225
2226 def lock(self):
2227 self.call("lock")
2228 return remotelock(self)
2229
2230 def unlock(self):
2231 self.call("unlock")
2232
2233 def heads(self):
2234 d = self.call("heads")
2235 try:
2236 return map(bin, d[:-1].split(" "))
2237 except:
2238 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2239
2240 def branches(self, nodes):
2241 n = " ".join(map(hex, nodes))
2242 d = self.call("branches", nodes=n)
2243 try:
2244 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2245 return br
2246 except:
2247 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2248
2249 def between(self, pairs):
2250 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2251 d = self.call("between", pairs=n)
2252 try:
2253 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2254 return p
2255 except:
2256 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2257
2258 def changegroup(self, nodes):
2259 n = " ".join(map(hex, nodes))
2260 f = self.do_cmd("changegroup", roots=n)
2261 return self.pipei
2262
2263 def addchangegroup(self, cg):
2264 d = self.call("addchangegroup")
2265 if d:
2266 raise RepoError("push refused: %s", d)
2267
2268 while 1:
2269 d = cg.read(4096)
2270 if not d: break
2271 self.pipeo.write(d)
2272 self.readerr()
2273
2274 self.pipeo.flush()
2275
2276 self.readerr()
2277 l = int(self.pipei.readline())
2278 return self.pipei.read(l) != ""
2279
2280 class httpsrepository(httprepository):
2281 pass
2282
2283 def repository(ui, path=None, create=0):
2284 if path:
2285 if path.startswith("http://"):
2286 return httprepository(ui, path)
2287 if path.startswith("https://"):
2288 return httpsrepository(ui, path)
2289 if path.startswith("hg://"):
2290 return httprepository(ui, path.replace("hg://", "http://"))
2291 if path.startswith("old-http://"):
2292 return localrepository(ui, path.replace("old-http://", "http://"))
2293 if path.startswith("ssh://"):
2294 return sshrepository(ui, path)
2295
2296 return localrepository(ui, path, create)
This diff has been collapsed as it changes many lines, (2261 lines changed) Show them Hide them
@@ -5,590 +5,13 b''
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import sys, struct, os
8 import os
9 import util
9 import util
10 from node import *
10 from revlog import *
11 from revlog import *
12 from repo import *
11 from demandload import *
13 from demandload import *
12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
14 demandload(globals(), "localrepo httprepo sshrepo")
13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 demandload(globals(), "bisect errno select stat")
15
16 class filelog(revlog):
17 def __init__(self, opener, path):
18 revlog.__init__(self, opener,
19 os.path.join("data", self.encodedir(path + ".i")),
20 os.path.join("data", self.encodedir(path + ".d")))
21
22 # This avoids a collision between a file named foo and a dir named
23 # foo.i or foo.d
24 def encodedir(self, path):
25 return (path
26 .replace(".hg/", ".hg.hg/")
27 .replace(".i/", ".i.hg/")
28 .replace(".d/", ".d.hg/"))
29
30 def decodedir(self, path):
31 return (path
32 .replace(".d.hg/", ".d/")
33 .replace(".i.hg/", ".i/")
34 .replace(".hg.hg/", ".hg/"))
35
36 def read(self, node):
37 t = self.revision(node)
38 if not t.startswith('\1\n'):
39 return t
40 s = t.find('\1\n', 2)
41 return t[s+2:]
42
43 def readmeta(self, node):
44 t = self.revision(node)
45 if not t.startswith('\1\n'):
46 return t
47 s = t.find('\1\n', 2)
48 mt = t[2:s]
49 for l in mt.splitlines():
50 k, v = l.split(": ", 1)
51 m[k] = v
52 return m
53
54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 if meta or text.startswith('\1\n'):
56 mt = ""
57 if meta:
58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 return self.addrevision(text, transaction, link, p1, p2)
61
62 def annotate(self, node):
63
64 def decorate(text, rev):
65 return ([rev] * len(text.splitlines()), text)
66
67 def pair(parent, child):
68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 child[0][b1:b2] = parent[0][a1:a2]
70 return child
71
72 # find all ancestors
73 needed = {node:1}
74 visit = [node]
75 while visit:
76 n = visit.pop(0)
77 for p in self.parents(n):
78 if p not in needed:
79 needed[p] = 1
80 visit.append(p)
81 else:
82 # count how many times we'll use this
83 needed[p] += 1
84
85 # sort by revision which is a topological order
86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 visit.sort()
88 hist = {}
89
90 for r,n in visit:
91 curr = decorate(self.read(n), self.linkrev(n))
92 for p in self.parents(n):
93 if p != nullid:
94 curr = pair(hist[p], curr)
95 # trim the history of unneeded revs
96 needed[p] -= 1
97 if not needed[p]:
98 del hist[p]
99 hist[n] = curr
100
101 return zip(hist[n][0], hist[n][1].splitlines(1))
102
103 class manifest(revlog):
104 def __init__(self, opener):
105 self.mapcache = None
106 self.listcache = None
107 self.addlist = None
108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109
110 def read(self, node):
111 if node == nullid: return {} # don't upset local cache
112 if self.mapcache and self.mapcache[0] == node:
113 return self.mapcache[1]
114 text = self.revision(node)
115 map = {}
116 flag = {}
117 self.listcache = (text, text.splitlines(1))
118 for l in self.listcache[1]:
119 (f, n) = l.split('\0')
120 map[f] = bin(n[:40])
121 flag[f] = (n[40:-1] == "x")
122 self.mapcache = (node, map, flag)
123 return map
124
125 def readflags(self, node):
126 if node == nullid: return {} # don't upset local cache
127 if not self.mapcache or self.mapcache[0] != node:
128 self.read(node)
129 return self.mapcache[2]
130
131 def diff(self, a, b):
132 # this is sneaky, as we're not actually using a and b
133 if self.listcache and self.addlist and self.listcache[0] == a:
134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 if mdiff.patch(a, d) != b:
136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 return mdiff.textdiff(a, b)
138 return d
139 else:
140 return mdiff.textdiff(a, b)
141
142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 changed=None):
144 # directly generate the mdiff delta from the data collected during
145 # the bisect loop below
146 def gendelta(delta):
147 i = 0
148 result = []
149 while i < len(delta):
150 start = delta[i][2]
151 end = delta[i][3]
152 l = delta[i][4]
153 if l == None:
154 l = ""
155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 and end >= delta[i+1][2]:
157 if delta[i+1][3] > end:
158 end = delta[i+1][3]
159 if delta[i+1][4]:
160 l += delta[i+1][4]
161 i += 1
162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 i += 1
164 return result
165
166 # apply the changes collected during the bisect loop to our addlist
167 def addlistdelta(addlist, delta):
168 # apply the deltas to the addlist. start from the bottom up
169 # so changes to the offsets don't mess things up.
170 i = len(delta)
171 while i > 0:
172 i -= 1
173 start = delta[i][0]
174 end = delta[i][1]
175 if delta[i][4]:
176 addlist[start:end] = [delta[i][4]]
177 else:
178 del addlist[start:end]
179 return addlist
180
181 # calculate the byte offset of the start of each line in the
182 # manifest
183 def calcoffsets(addlist):
184 offsets = [0] * (len(addlist) + 1)
185 offset = 0
186 i = 0
187 while i < len(addlist):
188 offsets[i] = offset
189 offset += len(addlist[i])
190 i += 1
191 offsets[i] = offset
192 return offsets
193
194 # if we're using the listcache, make sure it is valid and
195 # parented by the same node we're diffing against
196 if not changed or not self.listcache or not p1 or \
197 self.mapcache[0] != p1:
198 files = map.keys()
199 files.sort()
200
201 self.addlist = ["%s\000%s%s\n" %
202 (f, hex(map[f]), flags[f] and "x" or '')
203 for f in files]
204 cachedelta = None
205 else:
206 addlist = self.listcache[1]
207
208 # find the starting offset for each line in the add list
209 offsets = calcoffsets(addlist)
210
211 # combine the changed lists into one list for sorting
212 work = [[x, 0] for x in changed[0]]
213 work[len(work):] = [[x, 1] for x in changed[1]]
214 work.sort()
215
216 delta = []
217 bs = 0
218
219 for w in work:
220 f = w[0]
221 # bs will either be the index of the item or the insert point
222 bs = bisect.bisect(addlist, f, bs)
223 if bs < len(addlist):
224 fn = addlist[bs][:addlist[bs].index('\0')]
225 else:
226 fn = None
227 if w[1] == 0:
228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 flags[f] and "x" or '')
230 else:
231 l = None
232 start = bs
233 if fn != f:
234 # item not found, insert a new one
235 end = bs
236 if w[1] == 1:
237 sys.stderr.write("failed to remove %s from manifest\n"
238 % f)
239 sys.exit(1)
240 else:
241 # item is found, replace/delete the existing line
242 end = bs + 1
243 delta.append([start, end, offsets[start], offsets[end], l])
244
245 self.addlist = addlistdelta(addlist, delta)
246 if self.mapcache[0] == self.tip():
247 cachedelta = "".join(gendelta(delta))
248 else:
249 cachedelta = None
250
251 text = "".join(self.addlist)
252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 sys.stderr.write("manifest delta failure\n")
254 sys.exit(1)
255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 self.mapcache = (n, map, flags)
257 self.listcache = (text, self.addlist)
258 self.addlist = None
259
260 return n
261
262 class changelog(revlog):
263 def __init__(self, opener):
264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265
266 def extract(self, text):
267 if not text:
268 return (nullid, "", "0", [], "")
269 last = text.index("\n\n")
270 desc = text[last + 2:]
271 l = text[:last].splitlines()
272 manifest = bin(l[0])
273 user = l[1]
274 date = l[2]
275 if " " not in date:
276 date += " 0" # some tools used -d without a timezone
277 files = l[3:]
278 return (manifest, user, date, files, desc)
279
280 def read(self, node):
281 return self.extract(self.revision(node))
282
283 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
284 user=None, date=None):
285 if not date:
286 if time.daylight: offset = time.altzone
287 else: offset = time.timezone
288 date = "%d %d" % (time.time(), offset)
289 list.sort()
290 l = [hex(manifest), user, date] + list + ["", desc]
291 text = "\n".join(l)
292 return self.addrevision(text, transaction, self.count(), p1, p2)
293
294 class dirstate:
295 def __init__(self, opener, ui, root):
296 self.opener = opener
297 self.root = root
298 self.dirty = 0
299 self.ui = ui
300 self.map = None
301 self.pl = None
302 self.copies = {}
303 self.ignorefunc = None
304
305 def wjoin(self, f):
306 return os.path.join(self.root, f)
307
308 def getcwd(self):
309 cwd = os.getcwd()
310 if cwd == self.root: return ''
311 return cwd[len(self.root) + 1:]
312
313 def ignore(self, f):
314 if not self.ignorefunc:
315 bigpat = []
316 try:
317 l = file(self.wjoin(".hgignore"))
318 for pat in l:
319 p = pat.rstrip()
320 if p:
321 try:
322 re.compile(p)
323 except:
324 self.ui.warn("ignoring invalid ignore"
325 + " regular expression '%s'\n" % p)
326 else:
327 bigpat.append(p)
328 except IOError: pass
329
330 if bigpat:
331 s = "(?:%s)" % (")|(?:".join(bigpat))
332 r = re.compile(s)
333 self.ignorefunc = r.search
334 else:
335 self.ignorefunc = util.never
336
337 return self.ignorefunc(f)
338
339 def __del__(self):
340 if self.dirty:
341 self.write()
342
343 def __getitem__(self, key):
344 try:
345 return self.map[key]
346 except TypeError:
347 self.read()
348 return self[key]
349
350 def __contains__(self, key):
351 if not self.map: self.read()
352 return key in self.map
353
354 def parents(self):
355 if not self.pl:
356 self.read()
357 return self.pl
358
359 def markdirty(self):
360 if not self.dirty:
361 self.dirty = 1
362
363 def setparents(self, p1, p2=nullid):
364 self.markdirty()
365 self.pl = p1, p2
366
367 def state(self, key):
368 try:
369 return self[key][0]
370 except KeyError:
371 return "?"
372
373 def read(self):
374 if self.map is not None: return self.map
375
376 self.map = {}
377 self.pl = [nullid, nullid]
378 try:
379 st = self.opener("dirstate").read()
380 if not st: return
381 except: return
382
383 self.pl = [st[:20], st[20: 40]]
384
385 pos = 40
386 while pos < len(st):
387 e = struct.unpack(">cllll", st[pos:pos+17])
388 l = e[4]
389 pos += 17
390 f = st[pos:pos + l]
391 if '\0' in f:
392 f, c = f.split('\0')
393 self.copies[f] = c
394 self.map[f] = e[:4]
395 pos += l
396
397 def copy(self, source, dest):
398 self.read()
399 self.markdirty()
400 self.copies[dest] = source
401
402 def copied(self, file):
403 return self.copies.get(file, None)
404
405 def update(self, files, state, **kw):
406 ''' current states:
407 n normal
408 m needs merging
409 r marked for removal
410 a marked for addition'''
411
412 if not files: return
413 self.read()
414 self.markdirty()
415 for f in files:
416 if state == "r":
417 self.map[f] = ('r', 0, 0, 0)
418 else:
419 s = os.stat(os.path.join(self.root, f))
420 st_size = kw.get('st_size', s.st_size)
421 st_mtime = kw.get('st_mtime', s.st_mtime)
422 self.map[f] = (state, s.st_mode, st_size, st_mtime)
423
424 def forget(self, files):
425 if not files: return
426 self.read()
427 self.markdirty()
428 for f in files:
429 try:
430 del self.map[f]
431 except KeyError:
432 self.ui.warn("not in dirstate: %s!\n" % f)
433 pass
434
435 def clear(self):
436 self.map = {}
437 self.markdirty()
438
439 def write(self):
440 st = self.opener("dirstate", "w")
441 st.write("".join(self.pl))
442 for f, e in self.map.items():
443 c = self.copied(f)
444 if c:
445 f = f + "\0" + c
446 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
447 st.write(e + f)
448 self.dirty = 0
449
450 def filterfiles(self, files):
451 ret = {}
452 unknown = []
453
454 for x in files:
455 if x is '.':
456 return self.map.copy()
457 if x not in self.map:
458 unknown.append(x)
459 else:
460 ret[x] = self.map[x]
461
462 if not unknown:
463 return ret
464
465 b = self.map.keys()
466 b.sort()
467 blen = len(b)
468
469 for x in unknown:
470 bs = bisect.bisect(b, x)
471 if bs != 0 and b[bs-1] == x:
472 ret[x] = self.map[x]
473 continue
474 while bs < blen:
475 s = b[bs]
476 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
477 ret[s] = self.map[s]
478 else:
479 break
480 bs += 1
481 return ret
482
483 def walk(self, files=None, match=util.always, dc=None):
484 self.read()
485
486 # walk all files by default
487 if not files:
488 files = [self.root]
489 if not dc:
490 dc = self.map.copy()
491 elif not dc:
492 dc = self.filterfiles(files)
493
494 known = {'.hg': 1}
495 def seen(fn):
496 if fn in known: return True
497 known[fn] = 1
498 def traverse():
499 for ff in util.unique(files):
500 f = os.path.join(self.root, ff)
501 try:
502 st = os.stat(f)
503 except OSError, inst:
504 if ff not in dc: self.ui.warn('%s: %s\n' % (
505 util.pathto(self.getcwd(), ff),
506 inst.strerror))
507 continue
508 if stat.S_ISDIR(st.st_mode):
509 for dir, subdirs, fl in os.walk(f):
510 d = dir[len(self.root) + 1:]
511 nd = util.normpath(d)
512 if nd == '.': nd = ''
513 if seen(nd):
514 subdirs[:] = []
515 continue
516 for sd in subdirs:
517 ds = os.path.join(nd, sd +'/')
518 if self.ignore(ds) or not match(ds):
519 subdirs.remove(sd)
520 subdirs.sort()
521 fl.sort()
522 for fn in fl:
523 fn = util.pconvert(os.path.join(d, fn))
524 yield 'f', fn
525 elif stat.S_ISREG(st.st_mode):
526 yield 'f', ff
527 else:
528 kind = 'unknown'
529 if stat.S_ISCHR(st.st_mode): kind = 'character device'
530 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
531 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
532 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
533 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
534 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
535 util.pathto(self.getcwd(), ff),
536 kind))
537
538 ks = dc.keys()
539 ks.sort()
540 for k in ks:
541 yield 'm', k
542
543 # yield only files that match: all in dirstate, others only if
544 # not in .hgignore
545
546 for src, fn in util.unique(traverse()):
547 fn = util.normpath(fn)
548 if seen(fn): continue
549 if fn not in dc and self.ignore(fn):
550 continue
551 if match(fn):
552 yield src, fn
553
554 def changes(self, files=None, match=util.always):
555 self.read()
556 if not files:
557 dc = self.map.copy()
558 else:
559 dc = self.filterfiles(files)
560 lookup, modified, added, unknown = [], [], [], []
561 removed, deleted = [], []
562
563 for src, fn in self.walk(files, match, dc=dc):
564 try:
565 s = os.stat(os.path.join(self.root, fn))
566 except OSError:
567 continue
568 if not stat.S_ISREG(s.st_mode):
569 continue
570 c = dc.get(fn)
571 if c:
572 del dc[fn]
573 if c[0] == 'm':
574 modified.append(fn)
575 elif c[0] == 'a':
576 added.append(fn)
577 elif c[0] == 'r':
578 unknown.append(fn)
579 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
580 modified.append(fn)
581 elif c[3] != s.st_mtime:
582 lookup.append(fn)
583 else:
584 unknown.append(fn)
585
586 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
587 if c[0] == 'r':
588 removed.append(fn)
589 else:
590 deleted.append(fn)
591 return (lookup, modified, added, removed + deleted, unknown)
592
15
593 # used to avoid circular references so destructors work
16 # used to avoid circular references so destructors work
594 def opener(base):
17 def opener(base):
@@ -618,1679 +41,19 b' def opener(base):'
618
41
619 return o
42 return o
620
43
621 class RepoError(Exception): pass
622
623 class localrepository:
624 def __init__(self, ui, path=None, create=0):
625 self.remote = 0
626 if path and path.startswith("http://"):
627 self.remote = 1
628 self.path = path
629 else:
630 if not path:
631 p = os.getcwd()
632 while not os.path.isdir(os.path.join(p, ".hg")):
633 oldp = p
634 p = os.path.dirname(p)
635 if p == oldp: raise RepoError("no repo found")
636 path = p
637 self.path = os.path.join(path, ".hg")
638
639 if not create and not os.path.isdir(self.path):
640 raise RepoError("repository %s not found" % self.path)
641
642 self.root = os.path.abspath(path)
643 self.ui = ui
644
645 if create:
646 os.mkdir(self.path)
647 os.mkdir(self.join("data"))
648
649 self.opener = opener(self.path)
650 self.wopener = opener(self.root)
651 self.manifest = manifest(self.opener)
652 self.changelog = changelog(self.opener)
653 self.tagscache = None
654 self.nodetagscache = None
655
656 if not self.remote:
657 self.dirstate = dirstate(self.opener, ui, self.root)
658 try:
659 self.ui.readconfig(self.opener("hgrc"))
660 except IOError: pass
661
662 def hook(self, name, **args):
663 s = self.ui.config("hooks", name)
664 if s:
665 self.ui.note("running hook %s: %s\n" % (name, s))
666 old = {}
667 for k, v in args.items():
668 k = k.upper()
669 old[k] = os.environ.get(k, None)
670 os.environ[k] = v
671
672 r = os.system(s)
673
674 for k, v in old.items():
675 if v != None:
676 os.environ[k] = v
677 else:
678 del os.environ[k]
679
680 if r:
681 self.ui.warn("abort: %s hook failed with status %d!\n" %
682 (name, r))
683 return False
684 return True
685
686 def tags(self):
687 '''return a mapping of tag to node'''
688 if not self.tagscache:
689 self.tagscache = {}
690 def addtag(self, k, n):
691 try:
692 bin_n = bin(n)
693 except TypeError:
694 bin_n = ''
695 self.tagscache[k.strip()] = bin_n
696
697 try:
698 # read each head of the tags file, ending with the tip
699 # and add each tag found to the map, with "newer" ones
700 # taking precedence
701 fl = self.file(".hgtags")
702 h = fl.heads()
703 h.reverse()
704 for r in h:
705 for l in fl.read(r).splitlines():
706 if l:
707 n, k = l.split(" ", 1)
708 addtag(self, k, n)
709 except KeyError:
710 pass
711
712 try:
713 f = self.opener("localtags")
714 for l in f:
715 n, k = l.split(" ", 1)
716 addtag(self, k, n)
717 except IOError:
718 pass
719
720 self.tagscache['tip'] = self.changelog.tip()
721
722 return self.tagscache
723
724 def tagslist(self):
725 '''return a list of tags ordered by revision'''
726 l = []
727 for t, n in self.tags().items():
728 try:
729 r = self.changelog.rev(n)
730 except:
731 r = -2 # sort to the beginning of the list if unknown
732 l.append((r,t,n))
733 l.sort()
734 return [(t,n) for r,t,n in l]
735
736 def nodetags(self, node):
737 '''return the tags associated with a node'''
738 if not self.nodetagscache:
739 self.nodetagscache = {}
740 for t,n in self.tags().items():
741 self.nodetagscache.setdefault(n,[]).append(t)
742 return self.nodetagscache.get(node, [])
743
744 def lookup(self, key):
745 try:
746 return self.tags()[key]
747 except KeyError:
748 try:
749 return self.changelog.lookup(key)
750 except:
751 raise RepoError("unknown revision '%s'" % key)
752
753 def dev(self):
754 if self.remote: return -1
755 return os.stat(self.path).st_dev
756
757 def local(self):
758 return not self.remote
759
760 def join(self, f):
761 return os.path.join(self.path, f)
762
763 def wjoin(self, f):
764 return os.path.join(self.root, f)
765
766 def file(self, f):
767 if f[0] == '/': f = f[1:]
768 return filelog(self.opener, f)
769
770 def getcwd(self):
771 return self.dirstate.getcwd()
772
773 def wfile(self, f, mode='r'):
774 return self.wopener(f, mode)
775
776 def wread(self, filename):
777 return self.wopener(filename, 'r').read()
778
779 def wwrite(self, filename, data, fd=None):
780 if fd:
781 return fd.write(data)
782 return self.wopener(filename, 'w').write(data)
783
784 def transaction(self):
785 # save dirstate for undo
786 try:
787 ds = self.opener("dirstate").read()
788 except IOError:
789 ds = ""
790 self.opener("journal.dirstate", "w").write(ds)
791
792 def after():
793 util.rename(self.join("journal"), self.join("undo"))
794 util.rename(self.join("journal.dirstate"),
795 self.join("undo.dirstate"))
796
797 return transaction.transaction(self.ui.warn, self.opener,
798 self.join("journal"), after)
799
800 def recover(self):
801 lock = self.lock()
802 if os.path.exists(self.join("journal")):
803 self.ui.status("rolling back interrupted transaction\n")
804 return transaction.rollback(self.opener, self.join("journal"))
805 else:
806 self.ui.warn("no interrupted transaction available\n")
807
808 def undo(self):
809 lock = self.lock()
810 if os.path.exists(self.join("undo")):
811 self.ui.status("rolling back last transaction\n")
812 transaction.rollback(self.opener, self.join("undo"))
813 self.dirstate = None
814 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
815 self.dirstate = dirstate(self.opener, self.ui, self.root)
816 else:
817 self.ui.warn("no undo information available\n")
818
819 def lock(self, wait=1):
820 try:
821 return lock.lock(self.join("lock"), 0)
822 except lock.LockHeld, inst:
823 if wait:
824 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
825 return lock.lock(self.join("lock"), wait)
826 raise inst
827
828 def rawcommit(self, files, text, user, date, p1=None, p2=None):
829 orig_parent = self.dirstate.parents()[0] or nullid
830 p1 = p1 or self.dirstate.parents()[0] or nullid
831 p2 = p2 or self.dirstate.parents()[1] or nullid
832 c1 = self.changelog.read(p1)
833 c2 = self.changelog.read(p2)
834 m1 = self.manifest.read(c1[0])
835 mf1 = self.manifest.readflags(c1[0])
836 m2 = self.manifest.read(c2[0])
837 changed = []
838
839 if orig_parent == p1:
840 update_dirstate = 1
841 else:
842 update_dirstate = 0
843
844 tr = self.transaction()
845 mm = m1.copy()
846 mfm = mf1.copy()
847 linkrev = self.changelog.count()
848 for f in files:
849 try:
850 t = self.wread(f)
851 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
852 r = self.file(f)
853 mfm[f] = tm
854
855 fp1 = m1.get(f, nullid)
856 fp2 = m2.get(f, nullid)
857
858 # is the same revision on two branches of a merge?
859 if fp2 == fp1:
860 fp2 = nullid
861
862 if fp2 != nullid:
863 # is one parent an ancestor of the other?
864 fpa = r.ancestor(fp1, fp2)
865 if fpa == fp1:
866 fp1, fp2 = fp2, nullid
867 elif fpa == fp2:
868 fp2 = nullid
869
870 # is the file unmodified from the parent?
871 if t == r.read(fp1):
872 # record the proper existing parent in manifest
873 # no need to add a revision
874 mm[f] = fp1
875 continue
876
877 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
878 changed.append(f)
879 if update_dirstate:
880 self.dirstate.update([f], "n")
881 except IOError:
882 try:
883 del mm[f]
884 del mfm[f]
885 if update_dirstate:
886 self.dirstate.forget([f])
887 except:
888 # deleted from p2?
889 pass
890
891 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
892 user = user or self.ui.username()
893 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
894 tr.close()
895 if update_dirstate:
896 self.dirstate.setparents(n, nullid)
897
898 def commit(self, files = None, text = "", user = None, date = None,
899 match = util.always, force=False):
900 commit = []
901 remove = []
902 changed = []
903
904 if files:
905 for f in files:
906 s = self.dirstate.state(f)
907 if s in 'nmai':
908 commit.append(f)
909 elif s == 'r':
910 remove.append(f)
911 else:
912 self.ui.warn("%s not tracked!\n" % f)
913 else:
914 (c, a, d, u) = self.changes(match=match)
915 commit = c + a
916 remove = d
917
918 p1, p2 = self.dirstate.parents()
919 c1 = self.changelog.read(p1)
920 c2 = self.changelog.read(p2)
921 m1 = self.manifest.read(c1[0])
922 mf1 = self.manifest.readflags(c1[0])
923 m2 = self.manifest.read(c2[0])
924
925 if not commit and not remove and not force and p2 == nullid:
926 self.ui.status("nothing changed\n")
927 return None
928
929 if not self.hook("precommit"):
930 return None
931
932 lock = self.lock()
933 tr = self.transaction()
934
935 # check in files
936 new = {}
937 linkrev = self.changelog.count()
938 commit.sort()
939 for f in commit:
940 self.ui.note(f + "\n")
941 try:
942 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
943 t = self.wread(f)
944 except IOError:
945 self.ui.warn("trouble committing %s!\n" % f)
946 raise
947
948 meta = {}
949 cp = self.dirstate.copied(f)
950 if cp:
951 meta["copy"] = cp
952 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
953 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
954
955 r = self.file(f)
956 fp1 = m1.get(f, nullid)
957 fp2 = m2.get(f, nullid)
958
959 # is the same revision on two branches of a merge?
960 if fp2 == fp1:
961 fp2 = nullid
962
963 if fp2 != nullid:
964 # is one parent an ancestor of the other?
965 fpa = r.ancestor(fp1, fp2)
966 if fpa == fp1:
967 fp1, fp2 = fp2, nullid
968 elif fpa == fp2:
969 fp2 = nullid
970
971 # is the file unmodified from the parent?
972 if not meta and t == r.read(fp1):
973 # record the proper existing parent in manifest
974 # no need to add a revision
975 new[f] = fp1
976 continue
977
978 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
979 # remember what we've added so that we can later calculate
980 # the files to pull from a set of changesets
981 changed.append(f)
982
983 # update manifest
984 m1.update(new)
985 for f in remove:
986 if f in m1:
987 del m1[f]
988 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
989 (new, remove))
990
991 # add changeset
992 new = new.keys()
993 new.sort()
994
995 if not text:
996 edittext = ""
997 if p2 != nullid:
998 edittext += "HG: branch merge\n"
999 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
1000 edittext += "".join(["HG: changed %s\n" % f for f in changed])
1001 edittext += "".join(["HG: removed %s\n" % f for f in remove])
1002 if not changed and not remove:
1003 edittext += "HG: no files changed\n"
1004 edittext = self.ui.edit(edittext)
1005 if not edittext.rstrip():
1006 return None
1007 text = edittext
1008
1009 user = user or self.ui.username()
1010 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
1011 tr.close()
1012
1013 self.dirstate.setparents(n)
1014 self.dirstate.update(new, "n")
1015 self.dirstate.forget(remove)
1016
1017 if not self.hook("commit", node=hex(n)):
1018 return None
1019 return n
1020
1021 def walk(self, node=None, files=[], match=util.always):
1022 if node:
1023 for fn in self.manifest.read(self.changelog.read(node)[0]):
1024 if match(fn): yield 'm', fn
1025 else:
1026 for src, fn in self.dirstate.walk(files, match):
1027 yield src, fn
1028
1029 def changes(self, node1 = None, node2 = None, files = [],
1030 match = util.always):
1031 mf2, u = None, []
1032
1033 def fcmp(fn, mf):
1034 t1 = self.wread(fn)
1035 t2 = self.file(fn).read(mf.get(fn, nullid))
1036 return cmp(t1, t2)
1037
1038 def mfmatches(node):
1039 mf = dict(self.manifest.read(node))
1040 for fn in mf.keys():
1041 if not match(fn):
1042 del mf[fn]
1043 return mf
1044
1045 # are we comparing the working directory?
1046 if not node2:
1047 l, c, a, d, u = self.dirstate.changes(files, match)
1048
1049 # are we comparing working dir against its parent?
1050 if not node1:
1051 if l:
1052 # do a full compare of any files that might have changed
1053 change = self.changelog.read(self.dirstate.parents()[0])
1054 mf2 = mfmatches(change[0])
1055 for f in l:
1056 if fcmp(f, mf2):
1057 c.append(f)
1058
1059 for l in c, a, d, u:
1060 l.sort()
1061
1062 return (c, a, d, u)
1063
1064 # are we comparing working dir against non-tip?
1065 # generate a pseudo-manifest for the working dir
1066 if not node2:
1067 if not mf2:
1068 change = self.changelog.read(self.dirstate.parents()[0])
1069 mf2 = mfmatches(change[0])
1070 for f in a + c + l:
1071 mf2[f] = ""
1072 for f in d:
1073 if f in mf2: del mf2[f]
1074 else:
1075 change = self.changelog.read(node2)
1076 mf2 = mfmatches(change[0])
1077
1078 # flush lists from dirstate before comparing manifests
1079 c, a = [], []
1080
1081 change = self.changelog.read(node1)
1082 mf1 = mfmatches(change[0])
1083
1084 for fn in mf2:
1085 if mf1.has_key(fn):
1086 if mf1[fn] != mf2[fn]:
1087 if mf2[fn] != "" or fcmp(fn, mf1):
1088 c.append(fn)
1089 del mf1[fn]
1090 else:
1091 a.append(fn)
1092
1093 d = mf1.keys()
1094
1095 for l in c, a, d, u:
1096 l.sort()
1097
1098 return (c, a, d, u)
1099
1100 def add(self, list):
1101 for f in list:
1102 p = self.wjoin(f)
1103 if not os.path.exists(p):
1104 self.ui.warn("%s does not exist!\n" % f)
1105 elif not os.path.isfile(p):
1106 self.ui.warn("%s not added: only files supported currently\n" % f)
1107 elif self.dirstate.state(f) in 'an':
1108 self.ui.warn("%s already tracked!\n" % f)
1109 else:
1110 self.dirstate.update([f], "a")
1111
1112 def forget(self, list):
1113 for f in list:
1114 if self.dirstate.state(f) not in 'ai':
1115 self.ui.warn("%s not added!\n" % f)
1116 else:
1117 self.dirstate.forget([f])
1118
1119 def remove(self, list):
1120 for f in list:
1121 p = self.wjoin(f)
1122 if os.path.exists(p):
1123 self.ui.warn("%s still exists!\n" % f)
1124 elif self.dirstate.state(f) == 'a':
1125 self.ui.warn("%s never committed!\n" % f)
1126 self.dirstate.forget([f])
1127 elif f not in self.dirstate:
1128 self.ui.warn("%s not tracked!\n" % f)
1129 else:
1130 self.dirstate.update([f], "r")
1131
1132 def copy(self, source, dest):
1133 p = self.wjoin(dest)
1134 if not os.path.exists(p):
1135 self.ui.warn("%s does not exist!\n" % dest)
1136 elif not os.path.isfile(p):
1137 self.ui.warn("copy failed: %s is not a file\n" % dest)
1138 else:
1139 if self.dirstate.state(dest) == '?':
1140 self.dirstate.update([dest], "a")
1141 self.dirstate.copy(source, dest)
1142
1143 def heads(self):
1144 return self.changelog.heads()
1145
1146 # branchlookup returns a dict giving a list of branches for
1147 # each head. A branch is defined as the tag of a node or
1148 # the branch of the node's parents. If a node has multiple
1149 # branch tags, tags are eliminated if they are visible from other
1150 # branch tags.
1151 #
1152 # So, for this graph: a->b->c->d->e
1153 # \ /
1154 # aa -----/
1155 # a has tag 2.6.12
1156 # d has tag 2.6.13
1157 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1158 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1159 # from the list.
1160 #
1161 # It is possible that more than one head will have the same branch tag.
1162 # callers need to check the result for multiple heads under the same
1163 # branch tag if that is a problem for them (ie checkout of a specific
1164 # branch).
1165 #
1166 # passing in a specific branch will limit the depth of the search
1167 # through the parents. It won't limit the branches returned in the
1168 # result though.
1169 def branchlookup(self, heads=None, branch=None):
1170 if not heads:
1171 heads = self.heads()
1172 headt = [ h for h in heads ]
1173 chlog = self.changelog
1174 branches = {}
1175 merges = []
1176 seenmerge = {}
1177
1178 # traverse the tree once for each head, recording in the branches
1179 # dict which tags are visible from this head. The branches
1180 # dict also records which tags are visible from each tag
1181 # while we traverse.
1182 while headt or merges:
1183 if merges:
1184 n, found = merges.pop()
1185 visit = [n]
1186 else:
1187 h = headt.pop()
1188 visit = [h]
1189 found = [h]
1190 seen = {}
1191 while visit:
1192 n = visit.pop()
1193 if n in seen:
1194 continue
1195 pp = chlog.parents(n)
1196 tags = self.nodetags(n)
1197 if tags:
1198 for x in tags:
1199 if x == 'tip':
1200 continue
1201 for f in found:
1202 branches.setdefault(f, {})[n] = 1
1203 branches.setdefault(n, {})[n] = 1
1204 break
1205 if n not in found:
1206 found.append(n)
1207 if branch in tags:
1208 continue
1209 seen[n] = 1
1210 if pp[1] != nullid and n not in seenmerge:
1211 merges.append((pp[1], [x for x in found]))
1212 seenmerge[n] = 1
1213 if pp[0] != nullid:
1214 visit.append(pp[0])
1215 # traverse the branches dict, eliminating branch tags from each
1216 # head that are visible from another branch tag for that head.
1217 out = {}
1218 viscache = {}
1219 for h in heads:
1220 def visible(node):
1221 if node in viscache:
1222 return viscache[node]
1223 ret = {}
1224 visit = [node]
1225 while visit:
1226 x = visit.pop()
1227 if x in viscache:
1228 ret.update(viscache[x])
1229 elif x not in ret:
1230 ret[x] = 1
1231 if x in branches:
1232 visit[len(visit):] = branches[x].keys()
1233 viscache[node] = ret
1234 return ret
1235 if h not in branches:
1236 continue
1237 # O(n^2), but somewhat limited. This only searches the
1238 # tags visible from a specific head, not all the tags in the
1239 # whole repo.
1240 for b in branches[h]:
1241 vis = False
1242 for bb in branches[h].keys():
1243 if b != bb:
1244 if b in visible(bb):
1245 vis = True
1246 break
1247 if not vis:
1248 l = out.setdefault(h, [])
1249 l[len(l):] = self.nodetags(b)
1250 return out
1251
1252 def branches(self, nodes):
1253 if not nodes: nodes = [self.changelog.tip()]
1254 b = []
1255 for n in nodes:
1256 t = n
1257 while n:
1258 p = self.changelog.parents(n)
1259 if p[1] != nullid or p[0] == nullid:
1260 b.append((t, n, p[0], p[1]))
1261 break
1262 n = p[0]
1263 return b
1264
1265 def between(self, pairs):
1266 r = []
1267
1268 for top, bottom in pairs:
1269 n, l, i = top, [], 0
1270 f = 1
1271
1272 while n != bottom:
1273 p = self.changelog.parents(n)[0]
1274 if i == f:
1275 l.append(n)
1276 f = f * 2
1277 n = p
1278 i += 1
1279
1280 r.append(l)
1281
1282 return r
1283
1284 def newer(self, nodes):
1285 m = {}
1286 nl = []
1287 pm = {}
1288 cl = self.changelog
1289 t = l = cl.count()
1290
1291 # find the lowest numbered node
1292 for n in nodes:
1293 l = min(l, cl.rev(n))
1294 m[n] = 1
1295
1296 for i in xrange(l, t):
1297 n = cl.node(i)
1298 if n in m: # explicitly listed
1299 pm[n] = 1
1300 nl.append(n)
1301 continue
1302 for p in cl.parents(n):
1303 if p in pm: # parent listed
1304 pm[n] = 1
1305 nl.append(n)
1306 break
1307
1308 return nl
1309
1310 def findincoming(self, remote, base=None, heads=None):
1311 m = self.changelog.nodemap
1312 search = []
1313 fetch = {}
1314 seen = {}
1315 seenbranch = {}
1316 if base == None:
1317 base = {}
1318
1319 # assume we're closer to the tip than the root
1320 # and start by examining the heads
1321 self.ui.status("searching for changes\n")
1322
1323 if not heads:
1324 heads = remote.heads()
1325
1326 unknown = []
1327 for h in heads:
1328 if h not in m:
1329 unknown.append(h)
1330 else:
1331 base[h] = 1
1332
1333 if not unknown:
1334 return None
1335
1336 rep = {}
1337 reqcnt = 0
1338
1339 # search through remote branches
1340 # a 'branch' here is a linear segment of history, with four parts:
1341 # head, root, first parent, second parent
1342 # (a branch always has two parents (or none) by definition)
1343 unknown = remote.branches(unknown)
1344 while unknown:
1345 r = []
1346 while unknown:
1347 n = unknown.pop(0)
1348 if n[0] in seen:
1349 continue
1350
1351 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1352 if n[0] == nullid:
1353 break
1354 if n in seenbranch:
1355 self.ui.debug("branch already found\n")
1356 continue
1357 if n[1] and n[1] in m: # do we know the base?
1358 self.ui.debug("found incomplete branch %s:%s\n"
1359 % (short(n[0]), short(n[1])))
1360 search.append(n) # schedule branch range for scanning
1361 seenbranch[n] = 1
1362 else:
1363 if n[1] not in seen and n[1] not in fetch:
1364 if n[2] in m and n[3] in m:
1365 self.ui.debug("found new changeset %s\n" %
1366 short(n[1]))
1367 fetch[n[1]] = 1 # earliest unknown
1368 base[n[2]] = 1 # latest known
1369 continue
1370
1371 for a in n[2:4]:
1372 if a not in rep:
1373 r.append(a)
1374 rep[a] = 1
1375
1376 seen[n[0]] = 1
1377
1378 if r:
1379 reqcnt += 1
1380 self.ui.debug("request %d: %s\n" %
1381 (reqcnt, " ".join(map(short, r))))
1382 for p in range(0, len(r), 10):
1383 for b in remote.branches(r[p:p+10]):
1384 self.ui.debug("received %s:%s\n" %
1385 (short(b[0]), short(b[1])))
1386 if b[0] in m:
1387 self.ui.debug("found base node %s\n" % short(b[0]))
1388 base[b[0]] = 1
1389 elif b[0] not in seen:
1390 unknown.append(b)
1391
1392 # do binary search on the branches we found
1393 while search:
1394 n = search.pop(0)
1395 reqcnt += 1
1396 l = remote.between([(n[0], n[1])])[0]
1397 l.append(n[1])
1398 p = n[0]
1399 f = 1
1400 for i in l:
1401 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1402 if i in m:
1403 if f <= 2:
1404 self.ui.debug("found new branch changeset %s\n" %
1405 short(p))
1406 fetch[p] = 1
1407 base[i] = 1
1408 else:
1409 self.ui.debug("narrowed branch search to %s:%s\n"
1410 % (short(p), short(i)))
1411 search.append((p, i))
1412 break
1413 p, f = i, f * 2
1414
1415 # sanity check our fetch list
1416 for f in fetch.keys():
1417 if f in m:
1418 raise RepoError("already have changeset " + short(f[:4]))
1419
1420 if base.keys() == [nullid]:
1421 self.ui.warn("warning: pulling from an unrelated repository!\n")
1422
1423 self.ui.note("found new changesets starting at " +
1424 " ".join([short(f) for f in fetch]) + "\n")
1425
1426 self.ui.debug("%d total queries\n" % reqcnt)
1427
1428 return fetch.keys()
1429
1430 def findoutgoing(self, remote, base=None, heads=None):
1431 if base == None:
1432 base = {}
1433 self.findincoming(remote, base, heads)
1434
1435 self.ui.debug("common changesets up to "
1436 + " ".join(map(short, base.keys())) + "\n")
1437
1438 remain = dict.fromkeys(self.changelog.nodemap)
1439
1440 # prune everything remote has from the tree
1441 del remain[nullid]
1442 remove = base.keys()
1443 while remove:
1444 n = remove.pop(0)
1445 if n in remain:
1446 del remain[n]
1447 for p in self.changelog.parents(n):
1448 remove.append(p)
1449
1450 # find every node whose parents have been pruned
1451 subset = []
1452 for n in remain:
1453 p1, p2 = self.changelog.parents(n)
1454 if p1 not in remain and p2 not in remain:
1455 subset.append(n)
1456
1457 # this is the set of all roots we have to push
1458 return subset
1459
1460 def pull(self, remote):
1461 lock = self.lock()
1462
1463 # if we have an empty repo, fetch everything
1464 if self.changelog.tip() == nullid:
1465 self.ui.status("requesting all changes\n")
1466 fetch = [nullid]
1467 else:
1468 fetch = self.findincoming(remote)
1469
1470 if not fetch:
1471 self.ui.status("no changes found\n")
1472 return 1
1473
1474 cg = remote.changegroup(fetch)
1475 return self.addchangegroup(cg)
1476
1477 def push(self, remote, force=False):
1478 lock = remote.lock()
1479
1480 base = {}
1481 heads = remote.heads()
1482 inc = self.findincoming(remote, base, heads)
1483 if not force and inc:
1484 self.ui.warn("abort: unsynced remote changes!\n")
1485 self.ui.status("(did you forget to sync? use push -f to force)\n")
1486 return 1
1487
1488 update = self.findoutgoing(remote, base)
1489 if not update:
1490 self.ui.status("no changes found\n")
1491 return 1
1492 elif not force:
1493 if len(heads) < len(self.changelog.heads()):
1494 self.ui.warn("abort: push creates new remote branches!\n")
1495 self.ui.status("(did you forget to merge?" +
1496 " use push -f to force)\n")
1497 return 1
1498
1499 cg = self.changegroup(update)
1500 return remote.addchangegroup(cg)
1501
1502 def changegroup(self, basenodes):
1503 class genread:
1504 def __init__(self, generator):
1505 self.g = generator
1506 self.buf = ""
1507 def fillbuf(self):
1508 self.buf += "".join(self.g)
1509
1510 def read(self, l):
1511 while l > len(self.buf):
1512 try:
1513 self.buf += self.g.next()
1514 except StopIteration:
1515 break
1516 d, self.buf = self.buf[:l], self.buf[l:]
1517 return d
1518
1519 def gengroup():
1520 nodes = self.newer(basenodes)
1521
1522 # construct the link map
1523 linkmap = {}
1524 for n in nodes:
1525 linkmap[self.changelog.rev(n)] = n
1526
1527 # construct a list of all changed files
1528 changed = {}
1529 for n in nodes:
1530 c = self.changelog.read(n)
1531 for f in c[3]:
1532 changed[f] = 1
1533 changed = changed.keys()
1534 changed.sort()
1535
1536 # the changegroup is changesets + manifests + all file revs
1537 revs = [ self.changelog.rev(n) for n in nodes ]
1538
1539 for y in self.changelog.group(linkmap): yield y
1540 for y in self.manifest.group(linkmap): yield y
1541 for f in changed:
1542 yield struct.pack(">l", len(f) + 4) + f
1543 g = self.file(f).group(linkmap)
1544 for y in g:
1545 yield y
1546
1547 yield struct.pack(">l", 0)
1548
1549 return genread(gengroup())
1550
1551 def addchangegroup(self, source):
1552
1553 def getchunk():
1554 d = source.read(4)
1555 if not d: return ""
1556 l = struct.unpack(">l", d)[0]
1557 if l <= 4: return ""
1558 return source.read(l - 4)
1559
1560 def getgroup():
1561 while 1:
1562 c = getchunk()
1563 if not c: break
1564 yield c
1565
1566 def csmap(x):
1567 self.ui.debug("add changeset %s\n" % short(x))
1568 return self.changelog.count()
1569
1570 def revmap(x):
1571 return self.changelog.rev(x)
1572
1573 if not source: return
1574 changesets = files = revisions = 0
1575
1576 tr = self.transaction()
1577
1578 oldheads = len(self.changelog.heads())
1579
1580 # pull off the changeset group
1581 self.ui.status("adding changesets\n")
1582 co = self.changelog.tip()
1583 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1584 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1585
1586 # pull off the manifest group
1587 self.ui.status("adding manifests\n")
1588 mm = self.manifest.tip()
1589 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1590
1591 # process the files
1592 self.ui.status("adding file changes\n")
1593 while 1:
1594 f = getchunk()
1595 if not f: break
1596 self.ui.debug("adding %s revisions\n" % f)
1597 fl = self.file(f)
1598 o = fl.count()
1599 n = fl.addgroup(getgroup(), revmap, tr)
1600 revisions += fl.count() - o
1601 files += 1
1602
1603 newheads = len(self.changelog.heads())
1604 heads = ""
1605 if oldheads and newheads > oldheads:
1606 heads = " (+%d heads)" % (newheads - oldheads)
1607
1608 self.ui.status(("added %d changesets" +
1609 " with %d changes to %d files%s\n")
1610 % (changesets, revisions, files, heads))
1611
1612 tr.close()
1613
1614 if not self.hook("changegroup"):
1615 return 1
1616
1617 return
1618
1619 def update(self, node, allow=False, force=False, choose=None,
1620 moddirstate=True):
1621 pl = self.dirstate.parents()
1622 if not force and pl[1] != nullid:
1623 self.ui.warn("aborting: outstanding uncommitted merges\n")
1624 return 1
1625
1626 p1, p2 = pl[0], node
1627 pa = self.changelog.ancestor(p1, p2)
1628 m1n = self.changelog.read(p1)[0]
1629 m2n = self.changelog.read(p2)[0]
1630 man = self.manifest.ancestor(m1n, m2n)
1631 m1 = self.manifest.read(m1n)
1632 mf1 = self.manifest.readflags(m1n)
1633 m2 = self.manifest.read(m2n)
1634 mf2 = self.manifest.readflags(m2n)
1635 ma = self.manifest.read(man)
1636 mfa = self.manifest.readflags(man)
1637
1638 (c, a, d, u) = self.changes()
1639
1640 # is this a jump, or a merge? i.e. is there a linear path
1641 # from p1 to p2?
1642 linear_path = (pa == p1 or pa == p2)
1643
1644 # resolve the manifest to determine which files
1645 # we care about merging
1646 self.ui.note("resolving manifests\n")
1647 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1648 (force, allow, moddirstate, linear_path))
1649 self.ui.debug(" ancestor %s local %s remote %s\n" %
1650 (short(man), short(m1n), short(m2n)))
1651
1652 merge = {}
1653 get = {}
1654 remove = []
1655
1656 # construct a working dir manifest
1657 mw = m1.copy()
1658 mfw = mf1.copy()
1659 umap = dict.fromkeys(u)
1660
1661 for f in a + c + u:
1662 mw[f] = ""
1663 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1664
1665 for f in d:
1666 if f in mw: del mw[f]
1667
1668 # If we're jumping between revisions (as opposed to merging),
1669 # and if neither the working directory nor the target rev has
1670 # the file, then we need to remove it from the dirstate, to
1671 # prevent the dirstate from listing the file when it is no
1672 # longer in the manifest.
1673 if moddirstate and linear_path and f not in m2:
1674 self.dirstate.forget((f,))
1675
1676 # Compare manifests
1677 for f, n in mw.iteritems():
1678 if choose and not choose(f): continue
1679 if f in m2:
1680 s = 0
1681
1682 # is the wfile new since m1, and match m2?
1683 if f not in m1:
1684 t1 = self.wread(f)
1685 t2 = self.file(f).read(m2[f])
1686 if cmp(t1, t2) == 0:
1687 n = m2[f]
1688 del t1, t2
1689
1690 # are files different?
1691 if n != m2[f]:
1692 a = ma.get(f, nullid)
1693 # are both different from the ancestor?
1694 if n != a and m2[f] != a:
1695 self.ui.debug(" %s versions differ, resolve\n" % f)
1696 # merge executable bits
1697 # "if we changed or they changed, change in merge"
1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1699 mode = ((a^b) | (a^c)) ^ a
1700 merge[f] = (m1.get(f, nullid), m2[f], mode)
1701 s = 1
1702 # are we clobbering?
1703 # is remote's version newer?
1704 # or are we going back in time?
1705 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1706 self.ui.debug(" remote %s is newer, get\n" % f)
1707 get[f] = m2[f]
1708 s = 1
1709 elif f in umap:
1710 # this unknown file is the same as the checkout
1711 get[f] = m2[f]
1712
1713 if not s and mfw[f] != mf2[f]:
1714 if force:
1715 self.ui.debug(" updating permissions for %s\n" % f)
1716 util.set_exec(self.wjoin(f), mf2[f])
1717 else:
1718 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1719 mode = ((a^b) | (a^c)) ^ a
1720 if mode != b:
1721 self.ui.debug(" updating permissions for %s\n" % f)
1722 util.set_exec(self.wjoin(f), mode)
1723 del m2[f]
1724 elif f in ma:
1725 if n != ma[f]:
1726 r = "d"
1727 if not force and (linear_path or allow):
1728 r = self.ui.prompt(
1729 (" local changed %s which remote deleted\n" % f) +
1730 "(k)eep or (d)elete?", "[kd]", "k")
1731 if r == "d":
1732 remove.append(f)
1733 else:
1734 self.ui.debug("other deleted %s\n" % f)
1735 remove.append(f) # other deleted it
1736 else:
1737 if n == m1.get(f, nullid): # same as parent
1738 if p2 == pa: # going backwards?
1739 self.ui.debug("remote deleted %s\n" % f)
1740 remove.append(f)
1741 else:
1742 self.ui.debug("local created %s, keeping\n" % f)
1743 else:
1744 self.ui.debug("working dir created %s, keeping\n" % f)
1745
1746 for f, n in m2.iteritems():
1747 if choose and not choose(f): continue
1748 if f[0] == "/": continue
1749 if f in ma and n != ma[f]:
1750 r = "k"
1751 if not force and (linear_path or allow):
1752 r = self.ui.prompt(
1753 ("remote changed %s which local deleted\n" % f) +
1754 "(k)eep or (d)elete?", "[kd]", "k")
1755 if r == "k": get[f] = n
1756 elif f not in ma:
1757 self.ui.debug("remote created %s\n" % f)
1758 get[f] = n
1759 else:
1760 if force or p2 == pa: # going backwards?
1761 self.ui.debug("local deleted %s, recreating\n" % f)
1762 get[f] = n
1763 else:
1764 self.ui.debug("local deleted %s\n" % f)
1765
1766 del mw, m1, m2, ma
1767
1768 if force:
1769 for f in merge:
1770 get[f] = merge[f][1]
1771 merge = {}
1772
1773 if linear_path or force:
1774 # we don't need to do any magic, just jump to the new rev
1775 branch_merge = False
1776 p1, p2 = p2, nullid
1777 else:
1778 if not allow:
1779 self.ui.status("this update spans a branch" +
1780 " affecting the following files:\n")
1781 fl = merge.keys() + get.keys()
1782 fl.sort()
1783 for f in fl:
1784 cf = ""
1785 if f in merge: cf = " (resolve)"
1786 self.ui.status(" %s%s\n" % (f, cf))
1787 self.ui.warn("aborting update spanning branches!\n")
1788 self.ui.status("(use update -m to merge across branches" +
1789 " or -C to lose changes)\n")
1790 return 1
1791 branch_merge = True
1792
1793 if moddirstate:
1794 self.dirstate.setparents(p1, p2)
1795
1796 # get the files we don't need to change
1797 files = get.keys()
1798 files.sort()
1799 for f in files:
1800 if f[0] == "/": continue
1801 self.ui.note("getting %s\n" % f)
1802 t = self.file(f).read(get[f])
1803 try:
1804 self.wwrite(f, t)
1805 except IOError:
1806 os.makedirs(os.path.dirname(self.wjoin(f)))
1807 self.wwrite(f, t)
1808 util.set_exec(self.wjoin(f), mf2[f])
1809 if moddirstate:
1810 if branch_merge:
1811 self.dirstate.update([f], 'n', st_mtime=-1)
1812 else:
1813 self.dirstate.update([f], 'n')
1814
1815 # merge the tricky bits
1816 files = merge.keys()
1817 files.sort()
1818 for f in files:
1819 self.ui.status("merging %s\n" % f)
1820 my, other, flag = merge[f]
1821 self.merge3(f, my, other)
1822 util.set_exec(self.wjoin(f), flag)
1823 if moddirstate:
1824 if branch_merge:
1825 # We've done a branch merge, mark this file as merged
1826 # so that we properly record the merger later
1827 self.dirstate.update([f], 'm')
1828 else:
1829 # We've update-merged a locally modified file, so
1830 # we set the dirstate to emulate a normal checkout
1831 # of that file some time in the past. Thus our
1832 # merge will appear as a normal local file
1833 # modification.
1834 f_len = len(self.file(f).read(other))
1835 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1836
1837 remove.sort()
1838 for f in remove:
1839 self.ui.note("removing %s\n" % f)
1840 try:
1841 os.unlink(self.wjoin(f))
1842 except OSError, inst:
1843 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1844 # try removing directories that might now be empty
1845 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1846 except: pass
1847 if moddirstate:
1848 if branch_merge:
1849 self.dirstate.update(remove, 'r')
1850 else:
1851 self.dirstate.forget(remove)
1852
1853 def merge3(self, fn, my, other):
1854 """perform a 3-way merge in the working directory"""
1855
1856 def temp(prefix, node):
1857 pre = "%s~%s." % (os.path.basename(fn), prefix)
1858 (fd, name) = tempfile.mkstemp("", pre)
1859 f = os.fdopen(fd, "wb")
1860 self.wwrite(fn, fl.read(node), f)
1861 f.close()
1862 return name
1863
1864 fl = self.file(fn)
1865 base = fl.ancestor(my, other)
1866 a = self.wjoin(fn)
1867 b = temp("base", base)
1868 c = temp("other", other)
1869
1870 self.ui.note("resolving %s\n" % fn)
1871 self.ui.debug("file %s: other %s ancestor %s\n" %
1872 (fn, short(other), short(base)))
1873
1874 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1875 or "hgmerge")
1876 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1877 if r:
1878 self.ui.warn("merging %s failed!\n" % fn)
1879
1880 os.unlink(b)
1881 os.unlink(c)
1882
1883 def verify(self):
1884 filelinkrevs = {}
1885 filenodes = {}
1886 changesets = revisions = files = 0
1887 errors = 0
1888
1889 seen = {}
1890 self.ui.status("checking changesets\n")
1891 for i in range(self.changelog.count()):
1892 changesets += 1
1893 n = self.changelog.node(i)
1894 if n in seen:
1895 self.ui.warn("duplicate changeset at revision %d\n" % i)
1896 errors += 1
1897 seen[n] = 1
1898
1899 for p in self.changelog.parents(n):
1900 if p not in self.changelog.nodemap:
1901 self.ui.warn("changeset %s has unknown parent %s\n" %
1902 (short(n), short(p)))
1903 errors += 1
1904 try:
1905 changes = self.changelog.read(n)
1906 except Exception, inst:
1907 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1908 errors += 1
1909
1910 for f in changes[3]:
1911 filelinkrevs.setdefault(f, []).append(i)
1912
1913 seen = {}
1914 self.ui.status("checking manifests\n")
1915 for i in range(self.manifest.count()):
1916 n = self.manifest.node(i)
1917 if n in seen:
1918 self.ui.warn("duplicate manifest at revision %d\n" % i)
1919 errors += 1
1920 seen[n] = 1
1921
1922 for p in self.manifest.parents(n):
1923 if p not in self.manifest.nodemap:
1924 self.ui.warn("manifest %s has unknown parent %s\n" %
1925 (short(n), short(p)))
1926 errors += 1
1927
1928 try:
1929 delta = mdiff.patchtext(self.manifest.delta(n))
1930 except KeyboardInterrupt:
1931 self.ui.warn("aborted")
1932 sys.exit(0)
1933 except Exception, inst:
1934 self.ui.warn("unpacking manifest %s: %s\n"
1935 % (short(n), inst))
1936 errors += 1
1937
1938 ff = [ l.split('\0') for l in delta.splitlines() ]
1939 for f, fn in ff:
1940 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1941
1942 self.ui.status("crosschecking files in changesets and manifests\n")
1943 for f in filenodes:
1944 if f not in filelinkrevs:
1945 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1946 errors += 1
1947
1948 for f in filelinkrevs:
1949 if f not in filenodes:
1950 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1951 errors += 1
1952
1953 self.ui.status("checking files\n")
1954 ff = filenodes.keys()
1955 ff.sort()
1956 for f in ff:
1957 if f == "/dev/null": continue
1958 files += 1
1959 fl = self.file(f)
1960 nodes = { nullid: 1 }
1961 seen = {}
1962 for i in range(fl.count()):
1963 revisions += 1
1964 n = fl.node(i)
1965
1966 if n in seen:
1967 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1968 errors += 1
1969
1970 if n not in filenodes[f]:
1971 self.ui.warn("%s: %d:%s not in manifests\n"
1972 % (f, i, short(n)))
1973 errors += 1
1974 else:
1975 del filenodes[f][n]
1976
1977 flr = fl.linkrev(n)
1978 if flr not in filelinkrevs[f]:
1979 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1980 % (f, short(n), fl.linkrev(n)))
1981 errors += 1
1982 else:
1983 filelinkrevs[f].remove(flr)
1984
1985 # verify contents
1986 try:
1987 t = fl.read(n)
1988 except Exception, inst:
1989 self.ui.warn("unpacking file %s %s: %s\n"
1990 % (f, short(n), inst))
1991 errors += 1
1992
1993 # verify parents
1994 (p1, p2) = fl.parents(n)
1995 if p1 not in nodes:
1996 self.ui.warn("file %s:%s unknown parent 1 %s" %
1997 (f, short(n), short(p1)))
1998 errors += 1
1999 if p2 not in nodes:
2000 self.ui.warn("file %s:%s unknown parent 2 %s" %
2001 (f, short(n), short(p1)))
2002 errors += 1
2003 nodes[n] = 1
2004
2005 # cross-check
2006 for node in filenodes[f]:
2007 self.ui.warn("node %s in manifests not in %s\n"
2008 % (hex(node), f))
2009 errors += 1
2010
2011 self.ui.status("%d files, %d changesets, %d total revisions\n" %
2012 (files, changesets, revisions))
2013
2014 if errors:
2015 self.ui.warn("%d integrity errors encountered!\n" % errors)
2016 return 1
2017
2018 class remoterepository:
2019 def local(self):
2020 return False
2021
2022 class httprepository(remoterepository):
2023 def __init__(self, ui, path):
2024 # fix missing / after hostname
2025 s = urlparse.urlsplit(path)
2026 partial = s[2]
2027 if not partial: partial = "/"
2028 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
2029 self.ui = ui
2030 no_list = [ "localhost", "127.0.0.1" ]
2031 host = ui.config("http_proxy", "host")
2032 if host is None:
2033 host = os.environ.get("http_proxy")
2034 if host and host.startswith('http://'):
2035 host = host[7:]
2036 user = ui.config("http_proxy", "user")
2037 passwd = ui.config("http_proxy", "passwd")
2038 no = ui.config("http_proxy", "no")
2039 if no is None:
2040 no = os.environ.get("no_proxy")
2041 if no:
2042 no_list = no_list + no.split(",")
2043
2044 no_proxy = 0
2045 for h in no_list:
2046 if (path.startswith("http://" + h + "/") or
2047 path.startswith("http://" + h + ":") or
2048 path == "http://" + h):
2049 no_proxy = 1
2050
2051 # Note: urllib2 takes proxy values from the environment and those will
2052 # take precedence
2053 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
2054 try:
2055 if os.environ.has_key(env):
2056 del os.environ[env]
2057 except OSError:
2058 pass
2059
2060 proxy_handler = urllib2.BaseHandler()
2061 if host and not no_proxy:
2062 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
2063
2064 authinfo = None
2065 if user and passwd:
2066 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
2067 passmgr.add_password(None, host, user, passwd)
2068 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
2069
2070 opener = urllib2.build_opener(proxy_handler, authinfo)
2071 urllib2.install_opener(opener)
2072
2073 def dev(self):
2074 return -1
2075
2076 def do_cmd(self, cmd, **args):
2077 self.ui.debug("sending %s command\n" % cmd)
2078 q = {"cmd": cmd}
2079 q.update(args)
2080 qs = urllib.urlencode(q)
2081 cu = "%s?%s" % (self.url, qs)
2082 resp = urllib2.urlopen(cu)
2083 proto = resp.headers['content-type']
2084
2085 # accept old "text/plain" and "application/hg-changegroup" for now
2086 if not proto.startswith('application/mercurial') and \
2087 not proto.startswith('text/plain') and \
2088 not proto.startswith('application/hg-changegroup'):
2089 raise RepoError("'%s' does not appear to be an hg repository"
2090 % self.url)
2091
2092 if proto.startswith('application/mercurial'):
2093 version = proto[22:]
2094 if float(version) > 0.1:
2095 raise RepoError("'%s' uses newer protocol %s" %
2096 (self.url, version))
2097
2098 return resp
2099
2100 def heads(self):
2101 d = self.do_cmd("heads").read()
2102 try:
2103 return map(bin, d[:-1].split(" "))
2104 except:
2105 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2106 raise
2107
2108 def branches(self, nodes):
2109 n = " ".join(map(hex, nodes))
2110 d = self.do_cmd("branches", nodes=n).read()
2111 try:
2112 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2113 return br
2114 except:
2115 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2116 raise
2117
2118 def between(self, pairs):
2119 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2120 d = self.do_cmd("between", pairs=n).read()
2121 try:
2122 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2123 return p
2124 except:
2125 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2126 raise
2127
2128 def changegroup(self, nodes):
2129 n = " ".join(map(hex, nodes))
2130 f = self.do_cmd("changegroup", roots=n)
2131 bytes = 0
2132
2133 class zread:
2134 def __init__(self, f):
2135 self.zd = zlib.decompressobj()
2136 self.f = f
2137 self.buf = ""
2138 def read(self, l):
2139 while l > len(self.buf):
2140 r = self.f.read(4096)
2141 if r:
2142 self.buf += self.zd.decompress(r)
2143 else:
2144 self.buf += self.zd.flush()
2145 break
2146 d, self.buf = self.buf[:l], self.buf[l:]
2147 return d
2148
2149 return zread(f)
2150
2151 class remotelock:
2152 def __init__(self, repo):
2153 self.repo = repo
2154 def release(self):
2155 self.repo.unlock()
2156 self.repo = None
2157 def __del__(self):
2158 if self.repo:
2159 self.release()
2160
2161 class sshrepository(remoterepository):
2162 def __init__(self, ui, path):
2163 self.url = path
2164 self.ui = ui
2165
2166 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
2167 if not m:
2168 raise RepoError("couldn't parse destination %s" % path)
2169
2170 self.user = m.group(2)
2171 self.host = m.group(3)
2172 self.port = m.group(5)
2173 self.path = m.group(7) or "."
2174
2175 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2176 args = self.port and ("%s -p %s") % (args, self.port) or args
2177
2178 sshcmd = self.ui.config("ui", "ssh", "ssh")
2179 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2180 cmd = "%s %s '%s -R %s serve --stdio'"
2181 cmd = cmd % (sshcmd, args, remotecmd, self.path)
2182
2183 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2184
2185 def readerr(self):
2186 while 1:
2187 r,w,x = select.select([self.pipee], [], [], 0)
2188 if not r: break
2189 l = self.pipee.readline()
2190 if not l: break
2191 self.ui.status("remote: ", l)
2192
2193 def __del__(self):
2194 try:
2195 self.pipeo.close()
2196 self.pipei.close()
2197 for l in self.pipee:
2198 self.ui.status("remote: ", l)
2199 self.pipee.close()
2200 except:
2201 pass
2202
2203 def dev(self):
2204 return -1
2205
2206 def do_cmd(self, cmd, **args):
2207 self.ui.debug("sending %s command\n" % cmd)
2208 self.pipeo.write("%s\n" % cmd)
2209 for k, v in args.items():
2210 self.pipeo.write("%s %d\n" % (k, len(v)))
2211 self.pipeo.write(v)
2212 self.pipeo.flush()
2213
2214 return self.pipei
2215
2216 def call(self, cmd, **args):
2217 r = self.do_cmd(cmd, **args)
2218 l = r.readline()
2219 self.readerr()
2220 try:
2221 l = int(l)
2222 except:
2223 raise RepoError("unexpected response '%s'" % l)
2224 return r.read(l)
2225
2226 def lock(self):
2227 self.call("lock")
2228 return remotelock(self)
2229
2230 def unlock(self):
2231 self.call("unlock")
2232
2233 def heads(self):
2234 d = self.call("heads")
2235 try:
2236 return map(bin, d[:-1].split(" "))
2237 except:
2238 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2239
2240 def branches(self, nodes):
2241 n = " ".join(map(hex, nodes))
2242 d = self.call("branches", nodes=n)
2243 try:
2244 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2245 return br
2246 except:
2247 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2248
2249 def between(self, pairs):
2250 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2251 d = self.call("between", pairs=n)
2252 try:
2253 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2254 return p
2255 except:
2256 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2257
2258 def changegroup(self, nodes):
2259 n = " ".join(map(hex, nodes))
2260 f = self.do_cmd("changegroup", roots=n)
2261 return self.pipei
2262
2263 def addchangegroup(self, cg):
2264 d = self.call("addchangegroup")
2265 if d:
2266 raise RepoError("push refused: %s", d)
2267
2268 while 1:
2269 d = cg.read(4096)
2270 if not d: break
2271 self.pipeo.write(d)
2272 self.readerr()
2273
2274 self.pipeo.flush()
2275
2276 self.readerr()
2277 l = int(self.pipei.readline())
2278 return self.pipei.read(l) != ""
2279
2280 class httpsrepository(httprepository):
2281 pass
2282
2283 def repository(ui, path=None, create=0):
44 def repository(ui, path=None, create=0):
2284 if path:
45 if path:
2285 if path.startswith("http://"):
46 if path.startswith("http://"):
2286 return httprepository(ui, path)
47 return httprepo.httprepository(ui, path)
2287 if path.startswith("https://"):
48 if path.startswith("https://"):
2288 return httpsrepository(ui, path)
49 return httprepo.httpsrepository(ui, path)
2289 if path.startswith("hg://"):
50 if path.startswith("hg://"):
2290 return httprepository(ui, path.replace("hg://", "http://"))
51 return httprepo.httprepository(
52 ui, path.replace("hg://", "http://"))
2291 if path.startswith("old-http://"):
53 if path.startswith("old-http://"):
2292 return localrepository(ui, path.replace("old-http://", "http://"))
54 return localrepo.localrepository(
55 ui, opener, path.replace("old-http://", "http://"))
2293 if path.startswith("ssh://"):
56 if path.startswith("ssh://"):
2294 return sshrepository(ui, path)
57 return sshrepo.sshrepository(ui, path)
2295
58
2296 return localrepository(ui, path, create)
59 return localrepo.localrepository(ui, opener, path, create)
This diff has been collapsed as it changes many lines, (2162 lines changed) Show them Hide them
@@ -1,2023 +1,13 b''
1 # hg.py - repository classes for mercurial
1 # httprepo.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import sys, struct, os
8 import urllib, urllib2, urlparse, os, zlib
9 import util
9 from node import *
10 from revlog import *
10 from remoterepo import *
11 from demandload import *
12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 demandload(globals(), "bisect errno select stat")
15
16 class filelog(revlog):
17 def __init__(self, opener, path):
18 revlog.__init__(self, opener,
19 os.path.join("data", self.encodedir(path + ".i")),
20 os.path.join("data", self.encodedir(path + ".d")))
21
22 # This avoids a collision between a file named foo and a dir named
23 # foo.i or foo.d
24 def encodedir(self, path):
25 return (path
26 .replace(".hg/", ".hg.hg/")
27 .replace(".i/", ".i.hg/")
28 .replace(".d/", ".d.hg/"))
29
30 def decodedir(self, path):
31 return (path
32 .replace(".d.hg/", ".d/")
33 .replace(".i.hg/", ".i/")
34 .replace(".hg.hg/", ".hg/"))
35
36 def read(self, node):
37 t = self.revision(node)
38 if not t.startswith('\1\n'):
39 return t
40 s = t.find('\1\n', 2)
41 return t[s+2:]
42
43 def readmeta(self, node):
44 t = self.revision(node)
45 if not t.startswith('\1\n'):
46 return t
47 s = t.find('\1\n', 2)
48 mt = t[2:s]
49 for l in mt.splitlines():
50 k, v = l.split(": ", 1)
51 m[k] = v
52 return m
53
54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 if meta or text.startswith('\1\n'):
56 mt = ""
57 if meta:
58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 return self.addrevision(text, transaction, link, p1, p2)
61
62 def annotate(self, node):
63
64 def decorate(text, rev):
65 return ([rev] * len(text.splitlines()), text)
66
67 def pair(parent, child):
68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 child[0][b1:b2] = parent[0][a1:a2]
70 return child
71
72 # find all ancestors
73 needed = {node:1}
74 visit = [node]
75 while visit:
76 n = visit.pop(0)
77 for p in self.parents(n):
78 if p not in needed:
79 needed[p] = 1
80 visit.append(p)
81 else:
82 # count how many times we'll use this
83 needed[p] += 1
84
85 # sort by revision which is a topological order
86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 visit.sort()
88 hist = {}
89
90 for r,n in visit:
91 curr = decorate(self.read(n), self.linkrev(n))
92 for p in self.parents(n):
93 if p != nullid:
94 curr = pair(hist[p], curr)
95 # trim the history of unneeded revs
96 needed[p] -= 1
97 if not needed[p]:
98 del hist[p]
99 hist[n] = curr
100
101 return zip(hist[n][0], hist[n][1].splitlines(1))
102
103 class manifest(revlog):
104 def __init__(self, opener):
105 self.mapcache = None
106 self.listcache = None
107 self.addlist = None
108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109
110 def read(self, node):
111 if node == nullid: return {} # don't upset local cache
112 if self.mapcache and self.mapcache[0] == node:
113 return self.mapcache[1]
114 text = self.revision(node)
115 map = {}
116 flag = {}
117 self.listcache = (text, text.splitlines(1))
118 for l in self.listcache[1]:
119 (f, n) = l.split('\0')
120 map[f] = bin(n[:40])
121 flag[f] = (n[40:-1] == "x")
122 self.mapcache = (node, map, flag)
123 return map
124
125 def readflags(self, node):
126 if node == nullid: return {} # don't upset local cache
127 if not self.mapcache or self.mapcache[0] != node:
128 self.read(node)
129 return self.mapcache[2]
130
131 def diff(self, a, b):
132 # this is sneaky, as we're not actually using a and b
133 if self.listcache and self.addlist and self.listcache[0] == a:
134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 if mdiff.patch(a, d) != b:
136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 return mdiff.textdiff(a, b)
138 return d
139 else:
140 return mdiff.textdiff(a, b)
141
142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 changed=None):
144 # directly generate the mdiff delta from the data collected during
145 # the bisect loop below
146 def gendelta(delta):
147 i = 0
148 result = []
149 while i < len(delta):
150 start = delta[i][2]
151 end = delta[i][3]
152 l = delta[i][4]
153 if l == None:
154 l = ""
155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 and end >= delta[i+1][2]:
157 if delta[i+1][3] > end:
158 end = delta[i+1][3]
159 if delta[i+1][4]:
160 l += delta[i+1][4]
161 i += 1
162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 i += 1
164 return result
165
166 # apply the changes collected during the bisect loop to our addlist
167 def addlistdelta(addlist, delta):
168 # apply the deltas to the addlist. start from the bottom up
169 # so changes to the offsets don't mess things up.
170 i = len(delta)
171 while i > 0:
172 i -= 1
173 start = delta[i][0]
174 end = delta[i][1]
175 if delta[i][4]:
176 addlist[start:end] = [delta[i][4]]
177 else:
178 del addlist[start:end]
179 return addlist
180
181 # calculate the byte offset of the start of each line in the
182 # manifest
183 def calcoffsets(addlist):
184 offsets = [0] * (len(addlist) + 1)
185 offset = 0
186 i = 0
187 while i < len(addlist):
188 offsets[i] = offset
189 offset += len(addlist[i])
190 i += 1
191 offsets[i] = offset
192 return offsets
193
194 # if we're using the listcache, make sure it is valid and
195 # parented by the same node we're diffing against
196 if not changed or not self.listcache or not p1 or \
197 self.mapcache[0] != p1:
198 files = map.keys()
199 files.sort()
200
201 self.addlist = ["%s\000%s%s\n" %
202 (f, hex(map[f]), flags[f] and "x" or '')
203 for f in files]
204 cachedelta = None
205 else:
206 addlist = self.listcache[1]
207
208 # find the starting offset for each line in the add list
209 offsets = calcoffsets(addlist)
210
211 # combine the changed lists into one list for sorting
212 work = [[x, 0] for x in changed[0]]
213 work[len(work):] = [[x, 1] for x in changed[1]]
214 work.sort()
215
216 delta = []
217 bs = 0
218
219 for w in work:
220 f = w[0]
221 # bs will either be the index of the item or the insert point
222 bs = bisect.bisect(addlist, f, bs)
223 if bs < len(addlist):
224 fn = addlist[bs][:addlist[bs].index('\0')]
225 else:
226 fn = None
227 if w[1] == 0:
228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 flags[f] and "x" or '')
230 else:
231 l = None
232 start = bs
233 if fn != f:
234 # item not found, insert a new one
235 end = bs
236 if w[1] == 1:
237 sys.stderr.write("failed to remove %s from manifest\n"
238 % f)
239 sys.exit(1)
240 else:
241 # item is found, replace/delete the existing line
242 end = bs + 1
243 delta.append([start, end, offsets[start], offsets[end], l])
244
245 self.addlist = addlistdelta(addlist, delta)
246 if self.mapcache[0] == self.tip():
247 cachedelta = "".join(gendelta(delta))
248 else:
249 cachedelta = None
250
251 text = "".join(self.addlist)
252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 sys.stderr.write("manifest delta failure\n")
254 sys.exit(1)
255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 self.mapcache = (n, map, flags)
257 self.listcache = (text, self.addlist)
258 self.addlist = None
259
260 return n
261
262 class changelog(revlog):
263 def __init__(self, opener):
264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265
266 def extract(self, text):
267 if not text:
268 return (nullid, "", "0", [], "")
269 last = text.index("\n\n")
270 desc = text[last + 2:]
271 l = text[:last].splitlines()
272 manifest = bin(l[0])
273 user = l[1]
274 date = l[2]
275 if " " not in date:
276 date += " 0" # some tools used -d without a timezone
277 files = l[3:]
278 return (manifest, user, date, files, desc)
279
280 def read(self, node):
281 return self.extract(self.revision(node))
282
283 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
284 user=None, date=None):
285 if not date:
286 if time.daylight: offset = time.altzone
287 else: offset = time.timezone
288 date = "%d %d" % (time.time(), offset)
289 list.sort()
290 l = [hex(manifest), user, date] + list + ["", desc]
291 text = "\n".join(l)
292 return self.addrevision(text, transaction, self.count(), p1, p2)
293
294 class dirstate:
295 def __init__(self, opener, ui, root):
296 self.opener = opener
297 self.root = root
298 self.dirty = 0
299 self.ui = ui
300 self.map = None
301 self.pl = None
302 self.copies = {}
303 self.ignorefunc = None
304
305 def wjoin(self, f):
306 return os.path.join(self.root, f)
307
308 def getcwd(self):
309 cwd = os.getcwd()
310 if cwd == self.root: return ''
311 return cwd[len(self.root) + 1:]
312
313 def ignore(self, f):
314 if not self.ignorefunc:
315 bigpat = []
316 try:
317 l = file(self.wjoin(".hgignore"))
318 for pat in l:
319 p = pat.rstrip()
320 if p:
321 try:
322 re.compile(p)
323 except:
324 self.ui.warn("ignoring invalid ignore"
325 + " regular expression '%s'\n" % p)
326 else:
327 bigpat.append(p)
328 except IOError: pass
329
330 if bigpat:
331 s = "(?:%s)" % (")|(?:".join(bigpat))
332 r = re.compile(s)
333 self.ignorefunc = r.search
334 else:
335 self.ignorefunc = util.never
336
337 return self.ignorefunc(f)
338
339 def __del__(self):
340 if self.dirty:
341 self.write()
342
343 def __getitem__(self, key):
344 try:
345 return self.map[key]
346 except TypeError:
347 self.read()
348 return self[key]
349
350 def __contains__(self, key):
351 if not self.map: self.read()
352 return key in self.map
353
354 def parents(self):
355 if not self.pl:
356 self.read()
357 return self.pl
358
359 def markdirty(self):
360 if not self.dirty:
361 self.dirty = 1
362
363 def setparents(self, p1, p2=nullid):
364 self.markdirty()
365 self.pl = p1, p2
366
367 def state(self, key):
368 try:
369 return self[key][0]
370 except KeyError:
371 return "?"
372
373 def read(self):
374 if self.map is not None: return self.map
375
376 self.map = {}
377 self.pl = [nullid, nullid]
378 try:
379 st = self.opener("dirstate").read()
380 if not st: return
381 except: return
382
383 self.pl = [st[:20], st[20: 40]]
384
385 pos = 40
386 while pos < len(st):
387 e = struct.unpack(">cllll", st[pos:pos+17])
388 l = e[4]
389 pos += 17
390 f = st[pos:pos + l]
391 if '\0' in f:
392 f, c = f.split('\0')
393 self.copies[f] = c
394 self.map[f] = e[:4]
395 pos += l
396
397 def copy(self, source, dest):
398 self.read()
399 self.markdirty()
400 self.copies[dest] = source
401
402 def copied(self, file):
403 return self.copies.get(file, None)
404
405 def update(self, files, state, **kw):
406 ''' current states:
407 n normal
408 m needs merging
409 r marked for removal
410 a marked for addition'''
411
412 if not files: return
413 self.read()
414 self.markdirty()
415 for f in files:
416 if state == "r":
417 self.map[f] = ('r', 0, 0, 0)
418 else:
419 s = os.stat(os.path.join(self.root, f))
420 st_size = kw.get('st_size', s.st_size)
421 st_mtime = kw.get('st_mtime', s.st_mtime)
422 self.map[f] = (state, s.st_mode, st_size, st_mtime)
423
424 def forget(self, files):
425 if not files: return
426 self.read()
427 self.markdirty()
428 for f in files:
429 try:
430 del self.map[f]
431 except KeyError:
432 self.ui.warn("not in dirstate: %s!\n" % f)
433 pass
434
435 def clear(self):
436 self.map = {}
437 self.markdirty()
438
439 def write(self):
440 st = self.opener("dirstate", "w")
441 st.write("".join(self.pl))
442 for f, e in self.map.items():
443 c = self.copied(f)
444 if c:
445 f = f + "\0" + c
446 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
447 st.write(e + f)
448 self.dirty = 0
449
450 def filterfiles(self, files):
451 ret = {}
452 unknown = []
453
454 for x in files:
455 if x is '.':
456 return self.map.copy()
457 if x not in self.map:
458 unknown.append(x)
459 else:
460 ret[x] = self.map[x]
461
462 if not unknown:
463 return ret
464
465 b = self.map.keys()
466 b.sort()
467 blen = len(b)
468
469 for x in unknown:
470 bs = bisect.bisect(b, x)
471 if bs != 0 and b[bs-1] == x:
472 ret[x] = self.map[x]
473 continue
474 while bs < blen:
475 s = b[bs]
476 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
477 ret[s] = self.map[s]
478 else:
479 break
480 bs += 1
481 return ret
482
483 def walk(self, files=None, match=util.always, dc=None):
484 self.read()
485
486 # walk all files by default
487 if not files:
488 files = [self.root]
489 if not dc:
490 dc = self.map.copy()
491 elif not dc:
492 dc = self.filterfiles(files)
493
494 known = {'.hg': 1}
495 def seen(fn):
496 if fn in known: return True
497 known[fn] = 1
498 def traverse():
499 for ff in util.unique(files):
500 f = os.path.join(self.root, ff)
501 try:
502 st = os.stat(f)
503 except OSError, inst:
504 if ff not in dc: self.ui.warn('%s: %s\n' % (
505 util.pathto(self.getcwd(), ff),
506 inst.strerror))
507 continue
508 if stat.S_ISDIR(st.st_mode):
509 for dir, subdirs, fl in os.walk(f):
510 d = dir[len(self.root) + 1:]
511 nd = util.normpath(d)
512 if nd == '.': nd = ''
513 if seen(nd):
514 subdirs[:] = []
515 continue
516 for sd in subdirs:
517 ds = os.path.join(nd, sd +'/')
518 if self.ignore(ds) or not match(ds):
519 subdirs.remove(sd)
520 subdirs.sort()
521 fl.sort()
522 for fn in fl:
523 fn = util.pconvert(os.path.join(d, fn))
524 yield 'f', fn
525 elif stat.S_ISREG(st.st_mode):
526 yield 'f', ff
527 else:
528 kind = 'unknown'
529 if stat.S_ISCHR(st.st_mode): kind = 'character device'
530 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
531 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
532 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
533 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
534 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
535 util.pathto(self.getcwd(), ff),
536 kind))
537
538 ks = dc.keys()
539 ks.sort()
540 for k in ks:
541 yield 'm', k
542
543 # yield only files that match: all in dirstate, others only if
544 # not in .hgignore
545
546 for src, fn in util.unique(traverse()):
547 fn = util.normpath(fn)
548 if seen(fn): continue
549 if fn not in dc and self.ignore(fn):
550 continue
551 if match(fn):
552 yield src, fn
553
554 def changes(self, files=None, match=util.always):
555 self.read()
556 if not files:
557 dc = self.map.copy()
558 else:
559 dc = self.filterfiles(files)
560 lookup, modified, added, unknown = [], [], [], []
561 removed, deleted = [], []
562
563 for src, fn in self.walk(files, match, dc=dc):
564 try:
565 s = os.stat(os.path.join(self.root, fn))
566 except OSError:
567 continue
568 if not stat.S_ISREG(s.st_mode):
569 continue
570 c = dc.get(fn)
571 if c:
572 del dc[fn]
573 if c[0] == 'm':
574 modified.append(fn)
575 elif c[0] == 'a':
576 added.append(fn)
577 elif c[0] == 'r':
578 unknown.append(fn)
579 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
580 modified.append(fn)
581 elif c[3] != s.st_mtime:
582 lookup.append(fn)
583 else:
584 unknown.append(fn)
585
586 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
587 if c[0] == 'r':
588 removed.append(fn)
589 else:
590 deleted.append(fn)
591 return (lookup, modified, added, removed + deleted, unknown)
592
593 # used to avoid circular references so destructors work
594 def opener(base):
595 p = base
596 def o(path, mode="r"):
597 if p.startswith("http://"):
598 f = os.path.join(p, urllib.quote(path))
599 return httprangereader.httprangereader(f)
600
601 f = os.path.join(p, path)
602
603 mode += "b" # for that other OS
604
605 if mode[0] != "r":
606 try:
607 s = os.stat(f)
608 except OSError:
609 d = os.path.dirname(f)
610 if not os.path.isdir(d):
611 os.makedirs(d)
612 else:
613 if s.st_nlink > 1:
614 file(f + ".tmp", "wb").write(file(f, "rb").read())
615 util.rename(f+".tmp", f)
616
617 return file(f, mode)
618
619 return o
620
621 class RepoError(Exception): pass
622
623 class localrepository:
624 def __init__(self, ui, path=None, create=0):
625 self.remote = 0
626 if path and path.startswith("http://"):
627 self.remote = 1
628 self.path = path
629 else:
630 if not path:
631 p = os.getcwd()
632 while not os.path.isdir(os.path.join(p, ".hg")):
633 oldp = p
634 p = os.path.dirname(p)
635 if p == oldp: raise RepoError("no repo found")
636 path = p
637 self.path = os.path.join(path, ".hg")
638
639 if not create and not os.path.isdir(self.path):
640 raise RepoError("repository %s not found" % self.path)
641
642 self.root = os.path.abspath(path)
643 self.ui = ui
644
645 if create:
646 os.mkdir(self.path)
647 os.mkdir(self.join("data"))
648
649 self.opener = opener(self.path)
650 self.wopener = opener(self.root)
651 self.manifest = manifest(self.opener)
652 self.changelog = changelog(self.opener)
653 self.tagscache = None
654 self.nodetagscache = None
655
656 if not self.remote:
657 self.dirstate = dirstate(self.opener, ui, self.root)
658 try:
659 self.ui.readconfig(self.opener("hgrc"))
660 except IOError: pass
661
662 def hook(self, name, **args):
663 s = self.ui.config("hooks", name)
664 if s:
665 self.ui.note("running hook %s: %s\n" % (name, s))
666 old = {}
667 for k, v in args.items():
668 k = k.upper()
669 old[k] = os.environ.get(k, None)
670 os.environ[k] = v
671
672 r = os.system(s)
673
674 for k, v in old.items():
675 if v != None:
676 os.environ[k] = v
677 else:
678 del os.environ[k]
679
680 if r:
681 self.ui.warn("abort: %s hook failed with status %d!\n" %
682 (name, r))
683 return False
684 return True
685
686 def tags(self):
687 '''return a mapping of tag to node'''
688 if not self.tagscache:
689 self.tagscache = {}
690 def addtag(self, k, n):
691 try:
692 bin_n = bin(n)
693 except TypeError:
694 bin_n = ''
695 self.tagscache[k.strip()] = bin_n
696
697 try:
698 # read each head of the tags file, ending with the tip
699 # and add each tag found to the map, with "newer" ones
700 # taking precedence
701 fl = self.file(".hgtags")
702 h = fl.heads()
703 h.reverse()
704 for r in h:
705 for l in fl.read(r).splitlines():
706 if l:
707 n, k = l.split(" ", 1)
708 addtag(self, k, n)
709 except KeyError:
710 pass
711
712 try:
713 f = self.opener("localtags")
714 for l in f:
715 n, k = l.split(" ", 1)
716 addtag(self, k, n)
717 except IOError:
718 pass
719
720 self.tagscache['tip'] = self.changelog.tip()
721
722 return self.tagscache
723
724 def tagslist(self):
725 '''return a list of tags ordered by revision'''
726 l = []
727 for t, n in self.tags().items():
728 try:
729 r = self.changelog.rev(n)
730 except:
731 r = -2 # sort to the beginning of the list if unknown
732 l.append((r,t,n))
733 l.sort()
734 return [(t,n) for r,t,n in l]
735
736 def nodetags(self, node):
737 '''return the tags associated with a node'''
738 if not self.nodetagscache:
739 self.nodetagscache = {}
740 for t,n in self.tags().items():
741 self.nodetagscache.setdefault(n,[]).append(t)
742 return self.nodetagscache.get(node, [])
743
744 def lookup(self, key):
745 try:
746 return self.tags()[key]
747 except KeyError:
748 try:
749 return self.changelog.lookup(key)
750 except:
751 raise RepoError("unknown revision '%s'" % key)
752
753 def dev(self):
754 if self.remote: return -1
755 return os.stat(self.path).st_dev
756
757 def local(self):
758 return not self.remote
759
760 def join(self, f):
761 return os.path.join(self.path, f)
762
763 def wjoin(self, f):
764 return os.path.join(self.root, f)
765
766 def file(self, f):
767 if f[0] == '/': f = f[1:]
768 return filelog(self.opener, f)
769
770 def getcwd(self):
771 return self.dirstate.getcwd()
772
773 def wfile(self, f, mode='r'):
774 return self.wopener(f, mode)
775
776 def wread(self, filename):
777 return self.wopener(filename, 'r').read()
778
779 def wwrite(self, filename, data, fd=None):
780 if fd:
781 return fd.write(data)
782 return self.wopener(filename, 'w').write(data)
783
784 def transaction(self):
785 # save dirstate for undo
786 try:
787 ds = self.opener("dirstate").read()
788 except IOError:
789 ds = ""
790 self.opener("journal.dirstate", "w").write(ds)
791
792 def after():
793 util.rename(self.join("journal"), self.join("undo"))
794 util.rename(self.join("journal.dirstate"),
795 self.join("undo.dirstate"))
796
797 return transaction.transaction(self.ui.warn, self.opener,
798 self.join("journal"), after)
799
800 def recover(self):
801 lock = self.lock()
802 if os.path.exists(self.join("journal")):
803 self.ui.status("rolling back interrupted transaction\n")
804 return transaction.rollback(self.opener, self.join("journal"))
805 else:
806 self.ui.warn("no interrupted transaction available\n")
807
808 def undo(self):
809 lock = self.lock()
810 if os.path.exists(self.join("undo")):
811 self.ui.status("rolling back last transaction\n")
812 transaction.rollback(self.opener, self.join("undo"))
813 self.dirstate = None
814 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
815 self.dirstate = dirstate(self.opener, self.ui, self.root)
816 else:
817 self.ui.warn("no undo information available\n")
818
819 def lock(self, wait=1):
820 try:
821 return lock.lock(self.join("lock"), 0)
822 except lock.LockHeld, inst:
823 if wait:
824 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
825 return lock.lock(self.join("lock"), wait)
826 raise inst
827
828 def rawcommit(self, files, text, user, date, p1=None, p2=None):
829 orig_parent = self.dirstate.parents()[0] or nullid
830 p1 = p1 or self.dirstate.parents()[0] or nullid
831 p2 = p2 or self.dirstate.parents()[1] or nullid
832 c1 = self.changelog.read(p1)
833 c2 = self.changelog.read(p2)
834 m1 = self.manifest.read(c1[0])
835 mf1 = self.manifest.readflags(c1[0])
836 m2 = self.manifest.read(c2[0])
837 changed = []
838
839 if orig_parent == p1:
840 update_dirstate = 1
841 else:
842 update_dirstate = 0
843
844 tr = self.transaction()
845 mm = m1.copy()
846 mfm = mf1.copy()
847 linkrev = self.changelog.count()
848 for f in files:
849 try:
850 t = self.wread(f)
851 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
852 r = self.file(f)
853 mfm[f] = tm
854
855 fp1 = m1.get(f, nullid)
856 fp2 = m2.get(f, nullid)
857
858 # is the same revision on two branches of a merge?
859 if fp2 == fp1:
860 fp2 = nullid
861
862 if fp2 != nullid:
863 # is one parent an ancestor of the other?
864 fpa = r.ancestor(fp1, fp2)
865 if fpa == fp1:
866 fp1, fp2 = fp2, nullid
867 elif fpa == fp2:
868 fp2 = nullid
869
870 # is the file unmodified from the parent?
871 if t == r.read(fp1):
872 # record the proper existing parent in manifest
873 # no need to add a revision
874 mm[f] = fp1
875 continue
876
877 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
878 changed.append(f)
879 if update_dirstate:
880 self.dirstate.update([f], "n")
881 except IOError:
882 try:
883 del mm[f]
884 del mfm[f]
885 if update_dirstate:
886 self.dirstate.forget([f])
887 except:
888 # deleted from p2?
889 pass
890
891 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
892 user = user or self.ui.username()
893 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
894 tr.close()
895 if update_dirstate:
896 self.dirstate.setparents(n, nullid)
897
898 def commit(self, files = None, text = "", user = None, date = None,
899 match = util.always, force=False):
900 commit = []
901 remove = []
902 changed = []
903
904 if files:
905 for f in files:
906 s = self.dirstate.state(f)
907 if s in 'nmai':
908 commit.append(f)
909 elif s == 'r':
910 remove.append(f)
911 else:
912 self.ui.warn("%s not tracked!\n" % f)
913 else:
914 (c, a, d, u) = self.changes(match=match)
915 commit = c + a
916 remove = d
917
918 p1, p2 = self.dirstate.parents()
919 c1 = self.changelog.read(p1)
920 c2 = self.changelog.read(p2)
921 m1 = self.manifest.read(c1[0])
922 mf1 = self.manifest.readflags(c1[0])
923 m2 = self.manifest.read(c2[0])
924
925 if not commit and not remove and not force and p2 == nullid:
926 self.ui.status("nothing changed\n")
927 return None
928
929 if not self.hook("precommit"):
930 return None
931
932 lock = self.lock()
933 tr = self.transaction()
934
935 # check in files
936 new = {}
937 linkrev = self.changelog.count()
938 commit.sort()
939 for f in commit:
940 self.ui.note(f + "\n")
941 try:
942 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
943 t = self.wread(f)
944 except IOError:
945 self.ui.warn("trouble committing %s!\n" % f)
946 raise
947
948 meta = {}
949 cp = self.dirstate.copied(f)
950 if cp:
951 meta["copy"] = cp
952 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
953 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
954
955 r = self.file(f)
956 fp1 = m1.get(f, nullid)
957 fp2 = m2.get(f, nullid)
958
959 # is the same revision on two branches of a merge?
960 if fp2 == fp1:
961 fp2 = nullid
962
963 if fp2 != nullid:
964 # is one parent an ancestor of the other?
965 fpa = r.ancestor(fp1, fp2)
966 if fpa == fp1:
967 fp1, fp2 = fp2, nullid
968 elif fpa == fp2:
969 fp2 = nullid
970
971 # is the file unmodified from the parent?
972 if not meta and t == r.read(fp1):
973 # record the proper existing parent in manifest
974 # no need to add a revision
975 new[f] = fp1
976 continue
977
978 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
979 # remember what we've added so that we can later calculate
980 # the files to pull from a set of changesets
981 changed.append(f)
982
983 # update manifest
984 m1.update(new)
985 for f in remove:
986 if f in m1:
987 del m1[f]
988 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
989 (new, remove))
990
991 # add changeset
992 new = new.keys()
993 new.sort()
994
995 if not text:
996 edittext = ""
997 if p2 != nullid:
998 edittext += "HG: branch merge\n"
999 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
1000 edittext += "".join(["HG: changed %s\n" % f for f in changed])
1001 edittext += "".join(["HG: removed %s\n" % f for f in remove])
1002 if not changed and not remove:
1003 edittext += "HG: no files changed\n"
1004 edittext = self.ui.edit(edittext)
1005 if not edittext.rstrip():
1006 return None
1007 text = edittext
1008
1009 user = user or self.ui.username()
1010 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
1011 tr.close()
1012
1013 self.dirstate.setparents(n)
1014 self.dirstate.update(new, "n")
1015 self.dirstate.forget(remove)
1016
1017 if not self.hook("commit", node=hex(n)):
1018 return None
1019 return n
1020
1021 def walk(self, node=None, files=[], match=util.always):
1022 if node:
1023 for fn in self.manifest.read(self.changelog.read(node)[0]):
1024 if match(fn): yield 'm', fn
1025 else:
1026 for src, fn in self.dirstate.walk(files, match):
1027 yield src, fn
1028
1029 def changes(self, node1 = None, node2 = None, files = [],
1030 match = util.always):
1031 mf2, u = None, []
1032
1033 def fcmp(fn, mf):
1034 t1 = self.wread(fn)
1035 t2 = self.file(fn).read(mf.get(fn, nullid))
1036 return cmp(t1, t2)
1037
1038 def mfmatches(node):
1039 mf = dict(self.manifest.read(node))
1040 for fn in mf.keys():
1041 if not match(fn):
1042 del mf[fn]
1043 return mf
1044
1045 # are we comparing the working directory?
1046 if not node2:
1047 l, c, a, d, u = self.dirstate.changes(files, match)
1048
1049 # are we comparing working dir against its parent?
1050 if not node1:
1051 if l:
1052 # do a full compare of any files that might have changed
1053 change = self.changelog.read(self.dirstate.parents()[0])
1054 mf2 = mfmatches(change[0])
1055 for f in l:
1056 if fcmp(f, mf2):
1057 c.append(f)
1058
1059 for l in c, a, d, u:
1060 l.sort()
1061
1062 return (c, a, d, u)
1063
1064 # are we comparing working dir against non-tip?
1065 # generate a pseudo-manifest for the working dir
1066 if not node2:
1067 if not mf2:
1068 change = self.changelog.read(self.dirstate.parents()[0])
1069 mf2 = mfmatches(change[0])
1070 for f in a + c + l:
1071 mf2[f] = ""
1072 for f in d:
1073 if f in mf2: del mf2[f]
1074 else:
1075 change = self.changelog.read(node2)
1076 mf2 = mfmatches(change[0])
1077
1078 # flush lists from dirstate before comparing manifests
1079 c, a = [], []
1080
1081 change = self.changelog.read(node1)
1082 mf1 = mfmatches(change[0])
1083
1084 for fn in mf2:
1085 if mf1.has_key(fn):
1086 if mf1[fn] != mf2[fn]:
1087 if mf2[fn] != "" or fcmp(fn, mf1):
1088 c.append(fn)
1089 del mf1[fn]
1090 else:
1091 a.append(fn)
1092
1093 d = mf1.keys()
1094
1095 for l in c, a, d, u:
1096 l.sort()
1097
1098 return (c, a, d, u)
1099
1100 def add(self, list):
1101 for f in list:
1102 p = self.wjoin(f)
1103 if not os.path.exists(p):
1104 self.ui.warn("%s does not exist!\n" % f)
1105 elif not os.path.isfile(p):
1106 self.ui.warn("%s not added: only files supported currently\n" % f)
1107 elif self.dirstate.state(f) in 'an':
1108 self.ui.warn("%s already tracked!\n" % f)
1109 else:
1110 self.dirstate.update([f], "a")
1111
1112 def forget(self, list):
1113 for f in list:
1114 if self.dirstate.state(f) not in 'ai':
1115 self.ui.warn("%s not added!\n" % f)
1116 else:
1117 self.dirstate.forget([f])
1118
1119 def remove(self, list):
1120 for f in list:
1121 p = self.wjoin(f)
1122 if os.path.exists(p):
1123 self.ui.warn("%s still exists!\n" % f)
1124 elif self.dirstate.state(f) == 'a':
1125 self.ui.warn("%s never committed!\n" % f)
1126 self.dirstate.forget([f])
1127 elif f not in self.dirstate:
1128 self.ui.warn("%s not tracked!\n" % f)
1129 else:
1130 self.dirstate.update([f], "r")
1131
1132 def copy(self, source, dest):
1133 p = self.wjoin(dest)
1134 if not os.path.exists(p):
1135 self.ui.warn("%s does not exist!\n" % dest)
1136 elif not os.path.isfile(p):
1137 self.ui.warn("copy failed: %s is not a file\n" % dest)
1138 else:
1139 if self.dirstate.state(dest) == '?':
1140 self.dirstate.update([dest], "a")
1141 self.dirstate.copy(source, dest)
1142
1143 def heads(self):
1144 return self.changelog.heads()
1145
1146 # branchlookup returns a dict giving a list of branches for
1147 # each head. A branch is defined as the tag of a node or
1148 # the branch of the node's parents. If a node has multiple
1149 # branch tags, tags are eliminated if they are visible from other
1150 # branch tags.
1151 #
1152 # So, for this graph: a->b->c->d->e
1153 # \ /
1154 # aa -----/
1155 # a has tag 2.6.12
1156 # d has tag 2.6.13
1157 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1158 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1159 # from the list.
1160 #
1161 # It is possible that more than one head will have the same branch tag.
1162 # callers need to check the result for multiple heads under the same
1163 # branch tag if that is a problem for them (ie checkout of a specific
1164 # branch).
1165 #
1166 # passing in a specific branch will limit the depth of the search
1167 # through the parents. It won't limit the branches returned in the
1168 # result though.
1169 def branchlookup(self, heads=None, branch=None):
1170 if not heads:
1171 heads = self.heads()
1172 headt = [ h for h in heads ]
1173 chlog = self.changelog
1174 branches = {}
1175 merges = []
1176 seenmerge = {}
1177
1178 # traverse the tree once for each head, recording in the branches
1179 # dict which tags are visible from this head. The branches
1180 # dict also records which tags are visible from each tag
1181 # while we traverse.
1182 while headt or merges:
1183 if merges:
1184 n, found = merges.pop()
1185 visit = [n]
1186 else:
1187 h = headt.pop()
1188 visit = [h]
1189 found = [h]
1190 seen = {}
1191 while visit:
1192 n = visit.pop()
1193 if n in seen:
1194 continue
1195 pp = chlog.parents(n)
1196 tags = self.nodetags(n)
1197 if tags:
1198 for x in tags:
1199 if x == 'tip':
1200 continue
1201 for f in found:
1202 branches.setdefault(f, {})[n] = 1
1203 branches.setdefault(n, {})[n] = 1
1204 break
1205 if n not in found:
1206 found.append(n)
1207 if branch in tags:
1208 continue
1209 seen[n] = 1
1210 if pp[1] != nullid and n not in seenmerge:
1211 merges.append((pp[1], [x for x in found]))
1212 seenmerge[n] = 1
1213 if pp[0] != nullid:
1214 visit.append(pp[0])
1215 # traverse the branches dict, eliminating branch tags from each
1216 # head that are visible from another branch tag for that head.
1217 out = {}
1218 viscache = {}
1219 for h in heads:
1220 def visible(node):
1221 if node in viscache:
1222 return viscache[node]
1223 ret = {}
1224 visit = [node]
1225 while visit:
1226 x = visit.pop()
1227 if x in viscache:
1228 ret.update(viscache[x])
1229 elif x not in ret:
1230 ret[x] = 1
1231 if x in branches:
1232 visit[len(visit):] = branches[x].keys()
1233 viscache[node] = ret
1234 return ret
1235 if h not in branches:
1236 continue
1237 # O(n^2), but somewhat limited. This only searches the
1238 # tags visible from a specific head, not all the tags in the
1239 # whole repo.
1240 for b in branches[h]:
1241 vis = False
1242 for bb in branches[h].keys():
1243 if b != bb:
1244 if b in visible(bb):
1245 vis = True
1246 break
1247 if not vis:
1248 l = out.setdefault(h, [])
1249 l[len(l):] = self.nodetags(b)
1250 return out
1251
1252 def branches(self, nodes):
1253 if not nodes: nodes = [self.changelog.tip()]
1254 b = []
1255 for n in nodes:
1256 t = n
1257 while n:
1258 p = self.changelog.parents(n)
1259 if p[1] != nullid or p[0] == nullid:
1260 b.append((t, n, p[0], p[1]))
1261 break
1262 n = p[0]
1263 return b
1264
1265 def between(self, pairs):
1266 r = []
1267
1268 for top, bottom in pairs:
1269 n, l, i = top, [], 0
1270 f = 1
1271
1272 while n != bottom:
1273 p = self.changelog.parents(n)[0]
1274 if i == f:
1275 l.append(n)
1276 f = f * 2
1277 n = p
1278 i += 1
1279
1280 r.append(l)
1281
1282 return r
1283
1284 def newer(self, nodes):
1285 m = {}
1286 nl = []
1287 pm = {}
1288 cl = self.changelog
1289 t = l = cl.count()
1290
1291 # find the lowest numbered node
1292 for n in nodes:
1293 l = min(l, cl.rev(n))
1294 m[n] = 1
1295
1296 for i in xrange(l, t):
1297 n = cl.node(i)
1298 if n in m: # explicitly listed
1299 pm[n] = 1
1300 nl.append(n)
1301 continue
1302 for p in cl.parents(n):
1303 if p in pm: # parent listed
1304 pm[n] = 1
1305 nl.append(n)
1306 break
1307
1308 return nl
1309
1310 def findincoming(self, remote, base=None, heads=None):
1311 m = self.changelog.nodemap
1312 search = []
1313 fetch = {}
1314 seen = {}
1315 seenbranch = {}
1316 if base == None:
1317 base = {}
1318
1319 # assume we're closer to the tip than the root
1320 # and start by examining the heads
1321 self.ui.status("searching for changes\n")
1322
1323 if not heads:
1324 heads = remote.heads()
1325
1326 unknown = []
1327 for h in heads:
1328 if h not in m:
1329 unknown.append(h)
1330 else:
1331 base[h] = 1
1332
1333 if not unknown:
1334 return None
1335
1336 rep = {}
1337 reqcnt = 0
1338
1339 # search through remote branches
1340 # a 'branch' here is a linear segment of history, with four parts:
1341 # head, root, first parent, second parent
1342 # (a branch always has two parents (or none) by definition)
1343 unknown = remote.branches(unknown)
1344 while unknown:
1345 r = []
1346 while unknown:
1347 n = unknown.pop(0)
1348 if n[0] in seen:
1349 continue
1350
1351 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1352 if n[0] == nullid:
1353 break
1354 if n in seenbranch:
1355 self.ui.debug("branch already found\n")
1356 continue
1357 if n[1] and n[1] in m: # do we know the base?
1358 self.ui.debug("found incomplete branch %s:%s\n"
1359 % (short(n[0]), short(n[1])))
1360 search.append(n) # schedule branch range for scanning
1361 seenbranch[n] = 1
1362 else:
1363 if n[1] not in seen and n[1] not in fetch:
1364 if n[2] in m and n[3] in m:
1365 self.ui.debug("found new changeset %s\n" %
1366 short(n[1]))
1367 fetch[n[1]] = 1 # earliest unknown
1368 base[n[2]] = 1 # latest known
1369 continue
1370
1371 for a in n[2:4]:
1372 if a not in rep:
1373 r.append(a)
1374 rep[a] = 1
1375
1376 seen[n[0]] = 1
1377
1378 if r:
1379 reqcnt += 1
1380 self.ui.debug("request %d: %s\n" %
1381 (reqcnt, " ".join(map(short, r))))
1382 for p in range(0, len(r), 10):
1383 for b in remote.branches(r[p:p+10]):
1384 self.ui.debug("received %s:%s\n" %
1385 (short(b[0]), short(b[1])))
1386 if b[0] in m:
1387 self.ui.debug("found base node %s\n" % short(b[0]))
1388 base[b[0]] = 1
1389 elif b[0] not in seen:
1390 unknown.append(b)
1391
1392 # do binary search on the branches we found
1393 while search:
1394 n = search.pop(0)
1395 reqcnt += 1
1396 l = remote.between([(n[0], n[1])])[0]
1397 l.append(n[1])
1398 p = n[0]
1399 f = 1
1400 for i in l:
1401 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1402 if i in m:
1403 if f <= 2:
1404 self.ui.debug("found new branch changeset %s\n" %
1405 short(p))
1406 fetch[p] = 1
1407 base[i] = 1
1408 else:
1409 self.ui.debug("narrowed branch search to %s:%s\n"
1410 % (short(p), short(i)))
1411 search.append((p, i))
1412 break
1413 p, f = i, f * 2
1414
1415 # sanity check our fetch list
1416 for f in fetch.keys():
1417 if f in m:
1418 raise RepoError("already have changeset " + short(f[:4]))
1419
1420 if base.keys() == [nullid]:
1421 self.ui.warn("warning: pulling from an unrelated repository!\n")
1422
1423 self.ui.note("found new changesets starting at " +
1424 " ".join([short(f) for f in fetch]) + "\n")
1425
1426 self.ui.debug("%d total queries\n" % reqcnt)
1427
1428 return fetch.keys()
1429
1430 def findoutgoing(self, remote, base=None, heads=None):
1431 if base == None:
1432 base = {}
1433 self.findincoming(remote, base, heads)
1434
1435 self.ui.debug("common changesets up to "
1436 + " ".join(map(short, base.keys())) + "\n")
1437
1438 remain = dict.fromkeys(self.changelog.nodemap)
1439
1440 # prune everything remote has from the tree
1441 del remain[nullid]
1442 remove = base.keys()
1443 while remove:
1444 n = remove.pop(0)
1445 if n in remain:
1446 del remain[n]
1447 for p in self.changelog.parents(n):
1448 remove.append(p)
1449
1450 # find every node whose parents have been pruned
1451 subset = []
1452 for n in remain:
1453 p1, p2 = self.changelog.parents(n)
1454 if p1 not in remain and p2 not in remain:
1455 subset.append(n)
1456
1457 # this is the set of all roots we have to push
1458 return subset
1459
1460 def pull(self, remote):
1461 lock = self.lock()
1462
1463 # if we have an empty repo, fetch everything
1464 if self.changelog.tip() == nullid:
1465 self.ui.status("requesting all changes\n")
1466 fetch = [nullid]
1467 else:
1468 fetch = self.findincoming(remote)
1469
1470 if not fetch:
1471 self.ui.status("no changes found\n")
1472 return 1
1473
1474 cg = remote.changegroup(fetch)
1475 return self.addchangegroup(cg)
1476
1477 def push(self, remote, force=False):
1478 lock = remote.lock()
1479
1480 base = {}
1481 heads = remote.heads()
1482 inc = self.findincoming(remote, base, heads)
1483 if not force and inc:
1484 self.ui.warn("abort: unsynced remote changes!\n")
1485 self.ui.status("(did you forget to sync? use push -f to force)\n")
1486 return 1
1487
1488 update = self.findoutgoing(remote, base)
1489 if not update:
1490 self.ui.status("no changes found\n")
1491 return 1
1492 elif not force:
1493 if len(heads) < len(self.changelog.heads()):
1494 self.ui.warn("abort: push creates new remote branches!\n")
1495 self.ui.status("(did you forget to merge?" +
1496 " use push -f to force)\n")
1497 return 1
1498
1499 cg = self.changegroup(update)
1500 return remote.addchangegroup(cg)
1501
1502 def changegroup(self, basenodes):
1503 class genread:
1504 def __init__(self, generator):
1505 self.g = generator
1506 self.buf = ""
1507 def fillbuf(self):
1508 self.buf += "".join(self.g)
1509
1510 def read(self, l):
1511 while l > len(self.buf):
1512 try:
1513 self.buf += self.g.next()
1514 except StopIteration:
1515 break
1516 d, self.buf = self.buf[:l], self.buf[l:]
1517 return d
1518
1519 def gengroup():
1520 nodes = self.newer(basenodes)
1521
1522 # construct the link map
1523 linkmap = {}
1524 for n in nodes:
1525 linkmap[self.changelog.rev(n)] = n
1526
1527 # construct a list of all changed files
1528 changed = {}
1529 for n in nodes:
1530 c = self.changelog.read(n)
1531 for f in c[3]:
1532 changed[f] = 1
1533 changed = changed.keys()
1534 changed.sort()
1535
1536 # the changegroup is changesets + manifests + all file revs
1537 revs = [ self.changelog.rev(n) for n in nodes ]
1538
1539 for y in self.changelog.group(linkmap): yield y
1540 for y in self.manifest.group(linkmap): yield y
1541 for f in changed:
1542 yield struct.pack(">l", len(f) + 4) + f
1543 g = self.file(f).group(linkmap)
1544 for y in g:
1545 yield y
1546
1547 yield struct.pack(">l", 0)
1548
1549 return genread(gengroup())
1550
1551 def addchangegroup(self, source):
1552
1553 def getchunk():
1554 d = source.read(4)
1555 if not d: return ""
1556 l = struct.unpack(">l", d)[0]
1557 if l <= 4: return ""
1558 return source.read(l - 4)
1559
1560 def getgroup():
1561 while 1:
1562 c = getchunk()
1563 if not c: break
1564 yield c
1565
1566 def csmap(x):
1567 self.ui.debug("add changeset %s\n" % short(x))
1568 return self.changelog.count()
1569
1570 def revmap(x):
1571 return self.changelog.rev(x)
1572
1573 if not source: return
1574 changesets = files = revisions = 0
1575
1576 tr = self.transaction()
1577
1578 oldheads = len(self.changelog.heads())
1579
1580 # pull off the changeset group
1581 self.ui.status("adding changesets\n")
1582 co = self.changelog.tip()
1583 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1584 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1585
1586 # pull off the manifest group
1587 self.ui.status("adding manifests\n")
1588 mm = self.manifest.tip()
1589 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1590
1591 # process the files
1592 self.ui.status("adding file changes\n")
1593 while 1:
1594 f = getchunk()
1595 if not f: break
1596 self.ui.debug("adding %s revisions\n" % f)
1597 fl = self.file(f)
1598 o = fl.count()
1599 n = fl.addgroup(getgroup(), revmap, tr)
1600 revisions += fl.count() - o
1601 files += 1
1602
1603 newheads = len(self.changelog.heads())
1604 heads = ""
1605 if oldheads and newheads > oldheads:
1606 heads = " (+%d heads)" % (newheads - oldheads)
1607
1608 self.ui.status(("added %d changesets" +
1609 " with %d changes to %d files%s\n")
1610 % (changesets, revisions, files, heads))
1611
1612 tr.close()
1613
1614 if not self.hook("changegroup"):
1615 return 1
1616
1617 return
1618
1619 def update(self, node, allow=False, force=False, choose=None,
1620 moddirstate=True):
1621 pl = self.dirstate.parents()
1622 if not force and pl[1] != nullid:
1623 self.ui.warn("aborting: outstanding uncommitted merges\n")
1624 return 1
1625
1626 p1, p2 = pl[0], node
1627 pa = self.changelog.ancestor(p1, p2)
1628 m1n = self.changelog.read(p1)[0]
1629 m2n = self.changelog.read(p2)[0]
1630 man = self.manifest.ancestor(m1n, m2n)
1631 m1 = self.manifest.read(m1n)
1632 mf1 = self.manifest.readflags(m1n)
1633 m2 = self.manifest.read(m2n)
1634 mf2 = self.manifest.readflags(m2n)
1635 ma = self.manifest.read(man)
1636 mfa = self.manifest.readflags(man)
1637
1638 (c, a, d, u) = self.changes()
1639
1640 # is this a jump, or a merge? i.e. is there a linear path
1641 # from p1 to p2?
1642 linear_path = (pa == p1 or pa == p2)
1643
1644 # resolve the manifest to determine which files
1645 # we care about merging
1646 self.ui.note("resolving manifests\n")
1647 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1648 (force, allow, moddirstate, linear_path))
1649 self.ui.debug(" ancestor %s local %s remote %s\n" %
1650 (short(man), short(m1n), short(m2n)))
1651
1652 merge = {}
1653 get = {}
1654 remove = []
1655
1656 # construct a working dir manifest
1657 mw = m1.copy()
1658 mfw = mf1.copy()
1659 umap = dict.fromkeys(u)
1660
1661 for f in a + c + u:
1662 mw[f] = ""
1663 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1664
1665 for f in d:
1666 if f in mw: del mw[f]
1667
1668 # If we're jumping between revisions (as opposed to merging),
1669 # and if neither the working directory nor the target rev has
1670 # the file, then we need to remove it from the dirstate, to
1671 # prevent the dirstate from listing the file when it is no
1672 # longer in the manifest.
1673 if moddirstate and linear_path and f not in m2:
1674 self.dirstate.forget((f,))
1675
1676 # Compare manifests
1677 for f, n in mw.iteritems():
1678 if choose and not choose(f): continue
1679 if f in m2:
1680 s = 0
1681
1682 # is the wfile new since m1, and match m2?
1683 if f not in m1:
1684 t1 = self.wread(f)
1685 t2 = self.file(f).read(m2[f])
1686 if cmp(t1, t2) == 0:
1687 n = m2[f]
1688 del t1, t2
1689
1690 # are files different?
1691 if n != m2[f]:
1692 a = ma.get(f, nullid)
1693 # are both different from the ancestor?
1694 if n != a and m2[f] != a:
1695 self.ui.debug(" %s versions differ, resolve\n" % f)
1696 # merge executable bits
1697 # "if we changed or they changed, change in merge"
1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1699 mode = ((a^b) | (a^c)) ^ a
1700 merge[f] = (m1.get(f, nullid), m2[f], mode)
1701 s = 1
1702 # are we clobbering?
1703 # is remote's version newer?
1704 # or are we going back in time?
1705 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1706 self.ui.debug(" remote %s is newer, get\n" % f)
1707 get[f] = m2[f]
1708 s = 1
1709 elif f in umap:
1710 # this unknown file is the same as the checkout
1711 get[f] = m2[f]
1712
1713 if not s and mfw[f] != mf2[f]:
1714 if force:
1715 self.ui.debug(" updating permissions for %s\n" % f)
1716 util.set_exec(self.wjoin(f), mf2[f])
1717 else:
1718 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1719 mode = ((a^b) | (a^c)) ^ a
1720 if mode != b:
1721 self.ui.debug(" updating permissions for %s\n" % f)
1722 util.set_exec(self.wjoin(f), mode)
1723 del m2[f]
1724 elif f in ma:
1725 if n != ma[f]:
1726 r = "d"
1727 if not force and (linear_path or allow):
1728 r = self.ui.prompt(
1729 (" local changed %s which remote deleted\n" % f) +
1730 "(k)eep or (d)elete?", "[kd]", "k")
1731 if r == "d":
1732 remove.append(f)
1733 else:
1734 self.ui.debug("other deleted %s\n" % f)
1735 remove.append(f) # other deleted it
1736 else:
1737 if n == m1.get(f, nullid): # same as parent
1738 if p2 == pa: # going backwards?
1739 self.ui.debug("remote deleted %s\n" % f)
1740 remove.append(f)
1741 else:
1742 self.ui.debug("local created %s, keeping\n" % f)
1743 else:
1744 self.ui.debug("working dir created %s, keeping\n" % f)
1745
1746 for f, n in m2.iteritems():
1747 if choose and not choose(f): continue
1748 if f[0] == "/": continue
1749 if f in ma and n != ma[f]:
1750 r = "k"
1751 if not force and (linear_path or allow):
1752 r = self.ui.prompt(
1753 ("remote changed %s which local deleted\n" % f) +
1754 "(k)eep or (d)elete?", "[kd]", "k")
1755 if r == "k": get[f] = n
1756 elif f not in ma:
1757 self.ui.debug("remote created %s\n" % f)
1758 get[f] = n
1759 else:
1760 if force or p2 == pa: # going backwards?
1761 self.ui.debug("local deleted %s, recreating\n" % f)
1762 get[f] = n
1763 else:
1764 self.ui.debug("local deleted %s\n" % f)
1765
1766 del mw, m1, m2, ma
1767
1768 if force:
1769 for f in merge:
1770 get[f] = merge[f][1]
1771 merge = {}
1772
1773 if linear_path or force:
1774 # we don't need to do any magic, just jump to the new rev
1775 branch_merge = False
1776 p1, p2 = p2, nullid
1777 else:
1778 if not allow:
1779 self.ui.status("this update spans a branch" +
1780 " affecting the following files:\n")
1781 fl = merge.keys() + get.keys()
1782 fl.sort()
1783 for f in fl:
1784 cf = ""
1785 if f in merge: cf = " (resolve)"
1786 self.ui.status(" %s%s\n" % (f, cf))
1787 self.ui.warn("aborting update spanning branches!\n")
1788 self.ui.status("(use update -m to merge across branches" +
1789 " or -C to lose changes)\n")
1790 return 1
1791 branch_merge = True
1792
1793 if moddirstate:
1794 self.dirstate.setparents(p1, p2)
1795
1796 # get the files we don't need to change
1797 files = get.keys()
1798 files.sort()
1799 for f in files:
1800 if f[0] == "/": continue
1801 self.ui.note("getting %s\n" % f)
1802 t = self.file(f).read(get[f])
1803 try:
1804 self.wwrite(f, t)
1805 except IOError:
1806 os.makedirs(os.path.dirname(self.wjoin(f)))
1807 self.wwrite(f, t)
1808 util.set_exec(self.wjoin(f), mf2[f])
1809 if moddirstate:
1810 if branch_merge:
1811 self.dirstate.update([f], 'n', st_mtime=-1)
1812 else:
1813 self.dirstate.update([f], 'n')
1814
1815 # merge the tricky bits
1816 files = merge.keys()
1817 files.sort()
1818 for f in files:
1819 self.ui.status("merging %s\n" % f)
1820 my, other, flag = merge[f]
1821 self.merge3(f, my, other)
1822 util.set_exec(self.wjoin(f), flag)
1823 if moddirstate:
1824 if branch_merge:
1825 # We've done a branch merge, mark this file as merged
1826 # so that we properly record the merger later
1827 self.dirstate.update([f], 'm')
1828 else:
1829 # We've update-merged a locally modified file, so
1830 # we set the dirstate to emulate a normal checkout
1831 # of that file some time in the past. Thus our
1832 # merge will appear as a normal local file
1833 # modification.
1834 f_len = len(self.file(f).read(other))
1835 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1836
1837 remove.sort()
1838 for f in remove:
1839 self.ui.note("removing %s\n" % f)
1840 try:
1841 os.unlink(self.wjoin(f))
1842 except OSError, inst:
1843 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1844 # try removing directories that might now be empty
1845 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1846 except: pass
1847 if moddirstate:
1848 if branch_merge:
1849 self.dirstate.update(remove, 'r')
1850 else:
1851 self.dirstate.forget(remove)
1852
1853 def merge3(self, fn, my, other):
1854 """perform a 3-way merge in the working directory"""
1855
1856 def temp(prefix, node):
1857 pre = "%s~%s." % (os.path.basename(fn), prefix)
1858 (fd, name) = tempfile.mkstemp("", pre)
1859 f = os.fdopen(fd, "wb")
1860 self.wwrite(fn, fl.read(node), f)
1861 f.close()
1862 return name
1863
1864 fl = self.file(fn)
1865 base = fl.ancestor(my, other)
1866 a = self.wjoin(fn)
1867 b = temp("base", base)
1868 c = temp("other", other)
1869
1870 self.ui.note("resolving %s\n" % fn)
1871 self.ui.debug("file %s: other %s ancestor %s\n" %
1872 (fn, short(other), short(base)))
1873
1874 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1875 or "hgmerge")
1876 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1877 if r:
1878 self.ui.warn("merging %s failed!\n" % fn)
1879
1880 os.unlink(b)
1881 os.unlink(c)
1882
1883 def verify(self):
1884 filelinkrevs = {}
1885 filenodes = {}
1886 changesets = revisions = files = 0
1887 errors = 0
1888
1889 seen = {}
1890 self.ui.status("checking changesets\n")
1891 for i in range(self.changelog.count()):
1892 changesets += 1
1893 n = self.changelog.node(i)
1894 if n in seen:
1895 self.ui.warn("duplicate changeset at revision %d\n" % i)
1896 errors += 1
1897 seen[n] = 1
1898
1899 for p in self.changelog.parents(n):
1900 if p not in self.changelog.nodemap:
1901 self.ui.warn("changeset %s has unknown parent %s\n" %
1902 (short(n), short(p)))
1903 errors += 1
1904 try:
1905 changes = self.changelog.read(n)
1906 except Exception, inst:
1907 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1908 errors += 1
1909
1910 for f in changes[3]:
1911 filelinkrevs.setdefault(f, []).append(i)
1912
1913 seen = {}
1914 self.ui.status("checking manifests\n")
1915 for i in range(self.manifest.count()):
1916 n = self.manifest.node(i)
1917 if n in seen:
1918 self.ui.warn("duplicate manifest at revision %d\n" % i)
1919 errors += 1
1920 seen[n] = 1
1921
1922 for p in self.manifest.parents(n):
1923 if p not in self.manifest.nodemap:
1924 self.ui.warn("manifest %s has unknown parent %s\n" %
1925 (short(n), short(p)))
1926 errors += 1
1927
1928 try:
1929 delta = mdiff.patchtext(self.manifest.delta(n))
1930 except KeyboardInterrupt:
1931 self.ui.warn("aborted")
1932 sys.exit(0)
1933 except Exception, inst:
1934 self.ui.warn("unpacking manifest %s: %s\n"
1935 % (short(n), inst))
1936 errors += 1
1937
1938 ff = [ l.split('\0') for l in delta.splitlines() ]
1939 for f, fn in ff:
1940 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1941
1942 self.ui.status("crosschecking files in changesets and manifests\n")
1943 for f in filenodes:
1944 if f not in filelinkrevs:
1945 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1946 errors += 1
1947
1948 for f in filelinkrevs:
1949 if f not in filenodes:
1950 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1951 errors += 1
1952
1953 self.ui.status("checking files\n")
1954 ff = filenodes.keys()
1955 ff.sort()
1956 for f in ff:
1957 if f == "/dev/null": continue
1958 files += 1
1959 fl = self.file(f)
1960 nodes = { nullid: 1 }
1961 seen = {}
1962 for i in range(fl.count()):
1963 revisions += 1
1964 n = fl.node(i)
1965
1966 if n in seen:
1967 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1968 errors += 1
1969
1970 if n not in filenodes[f]:
1971 self.ui.warn("%s: %d:%s not in manifests\n"
1972 % (f, i, short(n)))
1973 errors += 1
1974 else:
1975 del filenodes[f][n]
1976
1977 flr = fl.linkrev(n)
1978 if flr not in filelinkrevs[f]:
1979 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1980 % (f, short(n), fl.linkrev(n)))
1981 errors += 1
1982 else:
1983 filelinkrevs[f].remove(flr)
1984
1985 # verify contents
1986 try:
1987 t = fl.read(n)
1988 except Exception, inst:
1989 self.ui.warn("unpacking file %s %s: %s\n"
1990 % (f, short(n), inst))
1991 errors += 1
1992
1993 # verify parents
1994 (p1, p2) = fl.parents(n)
1995 if p1 not in nodes:
1996 self.ui.warn("file %s:%s unknown parent 1 %s" %
1997 (f, short(n), short(p1)))
1998 errors += 1
1999 if p2 not in nodes:
2000 self.ui.warn("file %s:%s unknown parent 2 %s" %
2001 (f, short(n), short(p1)))
2002 errors += 1
2003 nodes[n] = 1
2004
2005 # cross-check
2006 for node in filenodes[f]:
2007 self.ui.warn("node %s in manifests not in %s\n"
2008 % (hex(node), f))
2009 errors += 1
2010
2011 self.ui.status("%d files, %d changesets, %d total revisions\n" %
2012 (files, changesets, revisions))
2013
2014 if errors:
2015 self.ui.warn("%d integrity errors encountered!\n" % errors)
2016 return 1
2017
2018 class remoterepository:
2019 def local(self):
2020 return False
2021
11
2022 class httprepository(remoterepository):
12 class httprepository(remoterepository):
2023 def __init__(self, ui, path):
13 def __init__(self, ui, path):
@@ -2148,149 +138,5 b' class httprepository(remoterepository):'
2148
138
2149 return zread(f)
139 return zread(f)
2150
140
2151 class remotelock:
2152 def __init__(self, repo):
2153 self.repo = repo
2154 def release(self):
2155 self.repo.unlock()
2156 self.repo = None
2157 def __del__(self):
2158 if self.repo:
2159 self.release()
2160
2161 class sshrepository(remoterepository):
2162 def __init__(self, ui, path):
2163 self.url = path
2164 self.ui = ui
2165
2166 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
2167 if not m:
2168 raise RepoError("couldn't parse destination %s" % path)
2169
2170 self.user = m.group(2)
2171 self.host = m.group(3)
2172 self.port = m.group(5)
2173 self.path = m.group(7) or "."
2174
2175 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2176 args = self.port and ("%s -p %s") % (args, self.port) or args
2177
2178 sshcmd = self.ui.config("ui", "ssh", "ssh")
2179 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2180 cmd = "%s %s '%s -R %s serve --stdio'"
2181 cmd = cmd % (sshcmd, args, remotecmd, self.path)
2182
2183 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2184
2185 def readerr(self):
2186 while 1:
2187 r,w,x = select.select([self.pipee], [], [], 0)
2188 if not r: break
2189 l = self.pipee.readline()
2190 if not l: break
2191 self.ui.status("remote: ", l)
2192
2193 def __del__(self):
2194 try:
2195 self.pipeo.close()
2196 self.pipei.close()
2197 for l in self.pipee:
2198 self.ui.status("remote: ", l)
2199 self.pipee.close()
2200 except:
2201 pass
2202
2203 def dev(self):
2204 return -1
2205
2206 def do_cmd(self, cmd, **args):
2207 self.ui.debug("sending %s command\n" % cmd)
2208 self.pipeo.write("%s\n" % cmd)
2209 for k, v in args.items():
2210 self.pipeo.write("%s %d\n" % (k, len(v)))
2211 self.pipeo.write(v)
2212 self.pipeo.flush()
2213
2214 return self.pipei
2215
2216 def call(self, cmd, **args):
2217 r = self.do_cmd(cmd, **args)
2218 l = r.readline()
2219 self.readerr()
2220 try:
2221 l = int(l)
2222 except:
2223 raise RepoError("unexpected response '%s'" % l)
2224 return r.read(l)
2225
2226 def lock(self):
2227 self.call("lock")
2228 return remotelock(self)
2229
2230 def unlock(self):
2231 self.call("unlock")
2232
2233 def heads(self):
2234 d = self.call("heads")
2235 try:
2236 return map(bin, d[:-1].split(" "))
2237 except:
2238 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2239
2240 def branches(self, nodes):
2241 n = " ".join(map(hex, nodes))
2242 d = self.call("branches", nodes=n)
2243 try:
2244 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2245 return br
2246 except:
2247 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2248
2249 def between(self, pairs):
2250 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2251 d = self.call("between", pairs=n)
2252 try:
2253 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2254 return p
2255 except:
2256 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2257
2258 def changegroup(self, nodes):
2259 n = " ".join(map(hex, nodes))
2260 f = self.do_cmd("changegroup", roots=n)
2261 return self.pipei
2262
2263 def addchangegroup(self, cg):
2264 d = self.call("addchangegroup")
2265 if d:
2266 raise RepoError("push refused: %s", d)
2267
2268 while 1:
2269 d = cg.read(4096)
2270 if not d: break
2271 self.pipeo.write(d)
2272 self.readerr()
2273
2274 self.pipeo.flush()
2275
2276 self.readerr()
2277 l = int(self.pipei.readline())
2278 return self.pipei.read(l) != ""
2279
2280 class httpsrepository(httprepository):
141 class httpsrepository(httprepository):
2281 pass
142 pass
2282
2283 def repository(ui, path=None, create=0):
2284 if path:
2285 if path.startswith("http://"):
2286 return httprepository(ui, path)
2287 if path.startswith("https://"):
2288 return httpsrepository(ui, path)
2289 if path.startswith("hg://"):
2290 return httprepository(ui, path.replace("hg://", "http://"))
2291 if path.startswith("old-http://"):
2292 return localrepository(ui, path.replace("old-http://", "http://"))
2293 if path.startswith("ssh://"):
2294 return sshrepository(ui, path)
2295
2296 return localrepository(ui, path, create)
This diff has been collapsed as it changes many lines, (903 lines changed) Show them Hide them
@@ -1,627 +1,22 b''
1 # hg.py - repository classes for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import sys, struct, os
8 import sys, struct, os, util
9 import util
9 from repo import *
10 from revlog import *
10 from revlog import *
11 from filelog import *
12 from manifest import *
13 from changelog import *
11 from demandload import *
14 from demandload import *
12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
15 from dirstate import *
13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
16 demandload(globals(), "re lock transaction tempfile stat")
14 demandload(globals(), "bisect errno select stat")
15
16 class filelog(revlog):
17 def __init__(self, opener, path):
18 revlog.__init__(self, opener,
19 os.path.join("data", self.encodedir(path + ".i")),
20 os.path.join("data", self.encodedir(path + ".d")))
21
22 # This avoids a collision between a file named foo and a dir named
23 # foo.i or foo.d
24 def encodedir(self, path):
25 return (path
26 .replace(".hg/", ".hg.hg/")
27 .replace(".i/", ".i.hg/")
28 .replace(".d/", ".d.hg/"))
29
30 def decodedir(self, path):
31 return (path
32 .replace(".d.hg/", ".d/")
33 .replace(".i.hg/", ".i/")
34 .replace(".hg.hg/", ".hg/"))
35
36 def read(self, node):
37 t = self.revision(node)
38 if not t.startswith('\1\n'):
39 return t
40 s = t.find('\1\n', 2)
41 return t[s+2:]
42
43 def readmeta(self, node):
44 t = self.revision(node)
45 if not t.startswith('\1\n'):
46 return t
47 s = t.find('\1\n', 2)
48 mt = t[2:s]
49 for l in mt.splitlines():
50 k, v = l.split(": ", 1)
51 m[k] = v
52 return m
53
54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 if meta or text.startswith('\1\n'):
56 mt = ""
57 if meta:
58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 return self.addrevision(text, transaction, link, p1, p2)
61
62 def annotate(self, node):
63
64 def decorate(text, rev):
65 return ([rev] * len(text.splitlines()), text)
66
67 def pair(parent, child):
68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 child[0][b1:b2] = parent[0][a1:a2]
70 return child
71
72 # find all ancestors
73 needed = {node:1}
74 visit = [node]
75 while visit:
76 n = visit.pop(0)
77 for p in self.parents(n):
78 if p not in needed:
79 needed[p] = 1
80 visit.append(p)
81 else:
82 # count how many times we'll use this
83 needed[p] += 1
84
85 # sort by revision which is a topological order
86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 visit.sort()
88 hist = {}
89
90 for r,n in visit:
91 curr = decorate(self.read(n), self.linkrev(n))
92 for p in self.parents(n):
93 if p != nullid:
94 curr = pair(hist[p], curr)
95 # trim the history of unneeded revs
96 needed[p] -= 1
97 if not needed[p]:
98 del hist[p]
99 hist[n] = curr
100
101 return zip(hist[n][0], hist[n][1].splitlines(1))
102
103 class manifest(revlog):
104 def __init__(self, opener):
105 self.mapcache = None
106 self.listcache = None
107 self.addlist = None
108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109
110 def read(self, node):
111 if node == nullid: return {} # don't upset local cache
112 if self.mapcache and self.mapcache[0] == node:
113 return self.mapcache[1]
114 text = self.revision(node)
115 map = {}
116 flag = {}
117 self.listcache = (text, text.splitlines(1))
118 for l in self.listcache[1]:
119 (f, n) = l.split('\0')
120 map[f] = bin(n[:40])
121 flag[f] = (n[40:-1] == "x")
122 self.mapcache = (node, map, flag)
123 return map
124
125 def readflags(self, node):
126 if node == nullid: return {} # don't upset local cache
127 if not self.mapcache or self.mapcache[0] != node:
128 self.read(node)
129 return self.mapcache[2]
130
131 def diff(self, a, b):
132 # this is sneaky, as we're not actually using a and b
133 if self.listcache and self.addlist and self.listcache[0] == a:
134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 if mdiff.patch(a, d) != b:
136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 return mdiff.textdiff(a, b)
138 return d
139 else:
140 return mdiff.textdiff(a, b)
141
142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 changed=None):
144 # directly generate the mdiff delta from the data collected during
145 # the bisect loop below
146 def gendelta(delta):
147 i = 0
148 result = []
149 while i < len(delta):
150 start = delta[i][2]
151 end = delta[i][3]
152 l = delta[i][4]
153 if l == None:
154 l = ""
155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 and end >= delta[i+1][2]:
157 if delta[i+1][3] > end:
158 end = delta[i+1][3]
159 if delta[i+1][4]:
160 l += delta[i+1][4]
161 i += 1
162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 i += 1
164 return result
165
166 # apply the changes collected during the bisect loop to our addlist
167 def addlistdelta(addlist, delta):
168 # apply the deltas to the addlist. start from the bottom up
169 # so changes to the offsets don't mess things up.
170 i = len(delta)
171 while i > 0:
172 i -= 1
173 start = delta[i][0]
174 end = delta[i][1]
175 if delta[i][4]:
176 addlist[start:end] = [delta[i][4]]
177 else:
178 del addlist[start:end]
179 return addlist
180
181 # calculate the byte offset of the start of each line in the
182 # manifest
183 def calcoffsets(addlist):
184 offsets = [0] * (len(addlist) + 1)
185 offset = 0
186 i = 0
187 while i < len(addlist):
188 offsets[i] = offset
189 offset += len(addlist[i])
190 i += 1
191 offsets[i] = offset
192 return offsets
193
194 # if we're using the listcache, make sure it is valid and
195 # parented by the same node we're diffing against
196 if not changed or not self.listcache or not p1 or \
197 self.mapcache[0] != p1:
198 files = map.keys()
199 files.sort()
200
201 self.addlist = ["%s\000%s%s\n" %
202 (f, hex(map[f]), flags[f] and "x" or '')
203 for f in files]
204 cachedelta = None
205 else:
206 addlist = self.listcache[1]
207
208 # find the starting offset for each line in the add list
209 offsets = calcoffsets(addlist)
210
211 # combine the changed lists into one list for sorting
212 work = [[x, 0] for x in changed[0]]
213 work[len(work):] = [[x, 1] for x in changed[1]]
214 work.sort()
215
216 delta = []
217 bs = 0
218
219 for w in work:
220 f = w[0]
221 # bs will either be the index of the item or the insert point
222 bs = bisect.bisect(addlist, f, bs)
223 if bs < len(addlist):
224 fn = addlist[bs][:addlist[bs].index('\0')]
225 else:
226 fn = None
227 if w[1] == 0:
228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 flags[f] and "x" or '')
230 else:
231 l = None
232 start = bs
233 if fn != f:
234 # item not found, insert a new one
235 end = bs
236 if w[1] == 1:
237 sys.stderr.write("failed to remove %s from manifest\n"
238 % f)
239 sys.exit(1)
240 else:
241 # item is found, replace/delete the existing line
242 end = bs + 1
243 delta.append([start, end, offsets[start], offsets[end], l])
244
245 self.addlist = addlistdelta(addlist, delta)
246 if self.mapcache[0] == self.tip():
247 cachedelta = "".join(gendelta(delta))
248 else:
249 cachedelta = None
250
251 text = "".join(self.addlist)
252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 sys.stderr.write("manifest delta failure\n")
254 sys.exit(1)
255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 self.mapcache = (n, map, flags)
257 self.listcache = (text, self.addlist)
258 self.addlist = None
259
260 return n
261
262 class changelog(revlog):
263 def __init__(self, opener):
264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265
266 def extract(self, text):
267 if not text:
268 return (nullid, "", "0", [], "")
269 last = text.index("\n\n")
270 desc = text[last + 2:]
271 l = text[:last].splitlines()
272 manifest = bin(l[0])
273 user = l[1]
274 date = l[2]
275 if " " not in date:
276 date += " 0" # some tools used -d without a timezone
277 files = l[3:]
278 return (manifest, user, date, files, desc)
279
280 def read(self, node):
281 return self.extract(self.revision(node))
282
283 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
284 user=None, date=None):
285 if not date:
286 if time.daylight: offset = time.altzone
287 else: offset = time.timezone
288 date = "%d %d" % (time.time(), offset)
289 list.sort()
290 l = [hex(manifest), user, date] + list + ["", desc]
291 text = "\n".join(l)
292 return self.addrevision(text, transaction, self.count(), p1, p2)
293
294 class dirstate:
295 def __init__(self, opener, ui, root):
296 self.opener = opener
297 self.root = root
298 self.dirty = 0
299 self.ui = ui
300 self.map = None
301 self.pl = None
302 self.copies = {}
303 self.ignorefunc = None
304
305 def wjoin(self, f):
306 return os.path.join(self.root, f)
307
308 def getcwd(self):
309 cwd = os.getcwd()
310 if cwd == self.root: return ''
311 return cwd[len(self.root) + 1:]
312
313 def ignore(self, f):
314 if not self.ignorefunc:
315 bigpat = []
316 try:
317 l = file(self.wjoin(".hgignore"))
318 for pat in l:
319 p = pat.rstrip()
320 if p:
321 try:
322 re.compile(p)
323 except:
324 self.ui.warn("ignoring invalid ignore"
325 + " regular expression '%s'\n" % p)
326 else:
327 bigpat.append(p)
328 except IOError: pass
329
330 if bigpat:
331 s = "(?:%s)" % (")|(?:".join(bigpat))
332 r = re.compile(s)
333 self.ignorefunc = r.search
334 else:
335 self.ignorefunc = util.never
336
337 return self.ignorefunc(f)
338
339 def __del__(self):
340 if self.dirty:
341 self.write()
342
343 def __getitem__(self, key):
344 try:
345 return self.map[key]
346 except TypeError:
347 self.read()
348 return self[key]
349
350 def __contains__(self, key):
351 if not self.map: self.read()
352 return key in self.map
353
354 def parents(self):
355 if not self.pl:
356 self.read()
357 return self.pl
358
359 def markdirty(self):
360 if not self.dirty:
361 self.dirty = 1
362
363 def setparents(self, p1, p2=nullid):
364 self.markdirty()
365 self.pl = p1, p2
366
367 def state(self, key):
368 try:
369 return self[key][0]
370 except KeyError:
371 return "?"
372
373 def read(self):
374 if self.map is not None: return self.map
375
376 self.map = {}
377 self.pl = [nullid, nullid]
378 try:
379 st = self.opener("dirstate").read()
380 if not st: return
381 except: return
382
383 self.pl = [st[:20], st[20: 40]]
384
385 pos = 40
386 while pos < len(st):
387 e = struct.unpack(">cllll", st[pos:pos+17])
388 l = e[4]
389 pos += 17
390 f = st[pos:pos + l]
391 if '\0' in f:
392 f, c = f.split('\0')
393 self.copies[f] = c
394 self.map[f] = e[:4]
395 pos += l
396
397 def copy(self, source, dest):
398 self.read()
399 self.markdirty()
400 self.copies[dest] = source
401
402 def copied(self, file):
403 return self.copies.get(file, None)
404
405 def update(self, files, state, **kw):
406 ''' current states:
407 n normal
408 m needs merging
409 r marked for removal
410 a marked for addition'''
411
412 if not files: return
413 self.read()
414 self.markdirty()
415 for f in files:
416 if state == "r":
417 self.map[f] = ('r', 0, 0, 0)
418 else:
419 s = os.stat(os.path.join(self.root, f))
420 st_size = kw.get('st_size', s.st_size)
421 st_mtime = kw.get('st_mtime', s.st_mtime)
422 self.map[f] = (state, s.st_mode, st_size, st_mtime)
423
424 def forget(self, files):
425 if not files: return
426 self.read()
427 self.markdirty()
428 for f in files:
429 try:
430 del self.map[f]
431 except KeyError:
432 self.ui.warn("not in dirstate: %s!\n" % f)
433 pass
434
435 def clear(self):
436 self.map = {}
437 self.markdirty()
438
439 def write(self):
440 st = self.opener("dirstate", "w")
441 st.write("".join(self.pl))
442 for f, e in self.map.items():
443 c = self.copied(f)
444 if c:
445 f = f + "\0" + c
446 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
447 st.write(e + f)
448 self.dirty = 0
449
450 def filterfiles(self, files):
451 ret = {}
452 unknown = []
453
454 for x in files:
455 if x is '.':
456 return self.map.copy()
457 if x not in self.map:
458 unknown.append(x)
459 else:
460 ret[x] = self.map[x]
461
462 if not unknown:
463 return ret
464
465 b = self.map.keys()
466 b.sort()
467 blen = len(b)
468
469 for x in unknown:
470 bs = bisect.bisect(b, x)
471 if bs != 0 and b[bs-1] == x:
472 ret[x] = self.map[x]
473 continue
474 while bs < blen:
475 s = b[bs]
476 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
477 ret[s] = self.map[s]
478 else:
479 break
480 bs += 1
481 return ret
482
483 def walk(self, files=None, match=util.always, dc=None):
484 self.read()
485
486 # walk all files by default
487 if not files:
488 files = [self.root]
489 if not dc:
490 dc = self.map.copy()
491 elif not dc:
492 dc = self.filterfiles(files)
493
494 known = {'.hg': 1}
495 def seen(fn):
496 if fn in known: return True
497 known[fn] = 1
498 def traverse():
499 for ff in util.unique(files):
500 f = os.path.join(self.root, ff)
501 try:
502 st = os.stat(f)
503 except OSError, inst:
504 if ff not in dc: self.ui.warn('%s: %s\n' % (
505 util.pathto(self.getcwd(), ff),
506 inst.strerror))
507 continue
508 if stat.S_ISDIR(st.st_mode):
509 for dir, subdirs, fl in os.walk(f):
510 d = dir[len(self.root) + 1:]
511 nd = util.normpath(d)
512 if nd == '.': nd = ''
513 if seen(nd):
514 subdirs[:] = []
515 continue
516 for sd in subdirs:
517 ds = os.path.join(nd, sd +'/')
518 if self.ignore(ds) or not match(ds):
519 subdirs.remove(sd)
520 subdirs.sort()
521 fl.sort()
522 for fn in fl:
523 fn = util.pconvert(os.path.join(d, fn))
524 yield 'f', fn
525 elif stat.S_ISREG(st.st_mode):
526 yield 'f', ff
527 else:
528 kind = 'unknown'
529 if stat.S_ISCHR(st.st_mode): kind = 'character device'
530 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
531 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
532 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
533 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
534 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
535 util.pathto(self.getcwd(), ff),
536 kind))
537
538 ks = dc.keys()
539 ks.sort()
540 for k in ks:
541 yield 'm', k
542
543 # yield only files that match: all in dirstate, others only if
544 # not in .hgignore
545
546 for src, fn in util.unique(traverse()):
547 fn = util.normpath(fn)
548 if seen(fn): continue
549 if fn not in dc and self.ignore(fn):
550 continue
551 if match(fn):
552 yield src, fn
553
554 def changes(self, files=None, match=util.always):
555 self.read()
556 if not files:
557 dc = self.map.copy()
558 else:
559 dc = self.filterfiles(files)
560 lookup, modified, added, unknown = [], [], [], []
561 removed, deleted = [], []
562
563 for src, fn in self.walk(files, match, dc=dc):
564 try:
565 s = os.stat(os.path.join(self.root, fn))
566 except OSError:
567 continue
568 if not stat.S_ISREG(s.st_mode):
569 continue
570 c = dc.get(fn)
571 if c:
572 del dc[fn]
573 if c[0] == 'm':
574 modified.append(fn)
575 elif c[0] == 'a':
576 added.append(fn)
577 elif c[0] == 'r':
578 unknown.append(fn)
579 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
580 modified.append(fn)
581 elif c[3] != s.st_mtime:
582 lookup.append(fn)
583 else:
584 unknown.append(fn)
585
586 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
587 if c[0] == 'r':
588 removed.append(fn)
589 else:
590 deleted.append(fn)
591 return (lookup, modified, added, removed + deleted, unknown)
592
593 # used to avoid circular references so destructors work
594 def opener(base):
595 p = base
596 def o(path, mode="r"):
597 if p.startswith("http://"):
598 f = os.path.join(p, urllib.quote(path))
599 return httprangereader.httprangereader(f)
600
601 f = os.path.join(p, path)
602
603 mode += "b" # for that other OS
604
605 if mode[0] != "r":
606 try:
607 s = os.stat(f)
608 except OSError:
609 d = os.path.dirname(f)
610 if not os.path.isdir(d):
611 os.makedirs(d)
612 else:
613 if s.st_nlink > 1:
614 file(f + ".tmp", "wb").write(file(f, "rb").read())
615 util.rename(f+".tmp", f)
616
617 return file(f, mode)
618
619 return o
620
621 class RepoError(Exception): pass
622
17
623 class localrepository:
18 class localrepository:
624 def __init__(self, ui, path=None, create=0):
19 def __init__(self, ui, opener, path=None, create=0):
625 self.remote = 0
20 self.remote = 0
626 if path and path.startswith("http://"):
21 if path and path.startswith("http://"):
627 self.remote = 1
22 self.remote = 1
@@ -2014,283 +1409,3 b' class localrepository:'
2014 if errors:
1409 if errors:
2015 self.ui.warn("%d integrity errors encountered!\n" % errors)
1410 self.ui.warn("%d integrity errors encountered!\n" % errors)
2016 return 1
1411 return 1
2017
2018 class remoterepository:
2019 def local(self):
2020 return False
2021
2022 class httprepository(remoterepository):
2023 def __init__(self, ui, path):
2024 # fix missing / after hostname
2025 s = urlparse.urlsplit(path)
2026 partial = s[2]
2027 if not partial: partial = "/"
2028 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
2029 self.ui = ui
2030 no_list = [ "localhost", "127.0.0.1" ]
2031 host = ui.config("http_proxy", "host")
2032 if host is None:
2033 host = os.environ.get("http_proxy")
2034 if host and host.startswith('http://'):
2035 host = host[7:]
2036 user = ui.config("http_proxy", "user")
2037 passwd = ui.config("http_proxy", "passwd")
2038 no = ui.config("http_proxy", "no")
2039 if no is None:
2040 no = os.environ.get("no_proxy")
2041 if no:
2042 no_list = no_list + no.split(",")
2043
2044 no_proxy = 0
2045 for h in no_list:
2046 if (path.startswith("http://" + h + "/") or
2047 path.startswith("http://" + h + ":") or
2048 path == "http://" + h):
2049 no_proxy = 1
2050
2051 # Note: urllib2 takes proxy values from the environment and those will
2052 # take precedence
2053 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
2054 try:
2055 if os.environ.has_key(env):
2056 del os.environ[env]
2057 except OSError:
2058 pass
2059
2060 proxy_handler = urllib2.BaseHandler()
2061 if host and not no_proxy:
2062 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
2063
2064 authinfo = None
2065 if user and passwd:
2066 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
2067 passmgr.add_password(None, host, user, passwd)
2068 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
2069
2070 opener = urllib2.build_opener(proxy_handler, authinfo)
2071 urllib2.install_opener(opener)
2072
2073 def dev(self):
2074 return -1
2075
2076 def do_cmd(self, cmd, **args):
2077 self.ui.debug("sending %s command\n" % cmd)
2078 q = {"cmd": cmd}
2079 q.update(args)
2080 qs = urllib.urlencode(q)
2081 cu = "%s?%s" % (self.url, qs)
2082 resp = urllib2.urlopen(cu)
2083 proto = resp.headers['content-type']
2084
2085 # accept old "text/plain" and "application/hg-changegroup" for now
2086 if not proto.startswith('application/mercurial') and \
2087 not proto.startswith('text/plain') and \
2088 not proto.startswith('application/hg-changegroup'):
2089 raise RepoError("'%s' does not appear to be an hg repository"
2090 % self.url)
2091
2092 if proto.startswith('application/mercurial'):
2093 version = proto[22:]
2094 if float(version) > 0.1:
2095 raise RepoError("'%s' uses newer protocol %s" %
2096 (self.url, version))
2097
2098 return resp
2099
2100 def heads(self):
2101 d = self.do_cmd("heads").read()
2102 try:
2103 return map(bin, d[:-1].split(" "))
2104 except:
2105 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2106 raise
2107
2108 def branches(self, nodes):
2109 n = " ".join(map(hex, nodes))
2110 d = self.do_cmd("branches", nodes=n).read()
2111 try:
2112 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2113 return br
2114 except:
2115 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2116 raise
2117
2118 def between(self, pairs):
2119 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2120 d = self.do_cmd("between", pairs=n).read()
2121 try:
2122 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2123 return p
2124 except:
2125 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2126 raise
2127
2128 def changegroup(self, nodes):
2129 n = " ".join(map(hex, nodes))
2130 f = self.do_cmd("changegroup", roots=n)
2131 bytes = 0
2132
2133 class zread:
2134 def __init__(self, f):
2135 self.zd = zlib.decompressobj()
2136 self.f = f
2137 self.buf = ""
2138 def read(self, l):
2139 while l > len(self.buf):
2140 r = self.f.read(4096)
2141 if r:
2142 self.buf += self.zd.decompress(r)
2143 else:
2144 self.buf += self.zd.flush()
2145 break
2146 d, self.buf = self.buf[:l], self.buf[l:]
2147 return d
2148
2149 return zread(f)
2150
2151 class remotelock:
2152 def __init__(self, repo):
2153 self.repo = repo
2154 def release(self):
2155 self.repo.unlock()
2156 self.repo = None
2157 def __del__(self):
2158 if self.repo:
2159 self.release()
2160
2161 class sshrepository(remoterepository):
2162 def __init__(self, ui, path):
2163 self.url = path
2164 self.ui = ui
2165
2166 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
2167 if not m:
2168 raise RepoError("couldn't parse destination %s" % path)
2169
2170 self.user = m.group(2)
2171 self.host = m.group(3)
2172 self.port = m.group(5)
2173 self.path = m.group(7) or "."
2174
2175 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2176 args = self.port and ("%s -p %s") % (args, self.port) or args
2177
2178 sshcmd = self.ui.config("ui", "ssh", "ssh")
2179 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2180 cmd = "%s %s '%s -R %s serve --stdio'"
2181 cmd = cmd % (sshcmd, args, remotecmd, self.path)
2182
2183 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2184
2185 def readerr(self):
2186 while 1:
2187 r,w,x = select.select([self.pipee], [], [], 0)
2188 if not r: break
2189 l = self.pipee.readline()
2190 if not l: break
2191 self.ui.status("remote: ", l)
2192
2193 def __del__(self):
2194 try:
2195 self.pipeo.close()
2196 self.pipei.close()
2197 for l in self.pipee:
2198 self.ui.status("remote: ", l)
2199 self.pipee.close()
2200 except:
2201 pass
2202
2203 def dev(self):
2204 return -1
2205
2206 def do_cmd(self, cmd, **args):
2207 self.ui.debug("sending %s command\n" % cmd)
2208 self.pipeo.write("%s\n" % cmd)
2209 for k, v in args.items():
2210 self.pipeo.write("%s %d\n" % (k, len(v)))
2211 self.pipeo.write(v)
2212 self.pipeo.flush()
2213
2214 return self.pipei
2215
2216 def call(self, cmd, **args):
2217 r = self.do_cmd(cmd, **args)
2218 l = r.readline()
2219 self.readerr()
2220 try:
2221 l = int(l)
2222 except:
2223 raise RepoError("unexpected response '%s'" % l)
2224 return r.read(l)
2225
2226 def lock(self):
2227 self.call("lock")
2228 return remotelock(self)
2229
2230 def unlock(self):
2231 self.call("unlock")
2232
2233 def heads(self):
2234 d = self.call("heads")
2235 try:
2236 return map(bin, d[:-1].split(" "))
2237 except:
2238 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2239
2240 def branches(self, nodes):
2241 n = " ".join(map(hex, nodes))
2242 d = self.call("branches", nodes=n)
2243 try:
2244 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2245 return br
2246 except:
2247 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2248
2249 def between(self, pairs):
2250 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2251 d = self.call("between", pairs=n)
2252 try:
2253 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2254 return p
2255 except:
2256 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2257
2258 def changegroup(self, nodes):
2259 n = " ".join(map(hex, nodes))
2260 f = self.do_cmd("changegroup", roots=n)
2261 return self.pipei
2262
2263 def addchangegroup(self, cg):
2264 d = self.call("addchangegroup")
2265 if d:
2266 raise RepoError("push refused: %s", d)
2267
2268 while 1:
2269 d = cg.read(4096)
2270 if not d: break
2271 self.pipeo.write(d)
2272 self.readerr()
2273
2274 self.pipeo.flush()
2275
2276 self.readerr()
2277 l = int(self.pipei.readline())
2278 return self.pipei.read(l) != ""
2279
2280 class httpsrepository(httprepository):
2281 pass
2282
2283 def repository(ui, path=None, create=0):
2284 if path:
2285 if path.startswith("http://"):
2286 return httprepository(ui, path)
2287 if path.startswith("https://"):
2288 return httpsrepository(ui, path)
2289 if path.startswith("hg://"):
2290 return httprepository(ui, path.replace("hg://", "http://"))
2291 if path.startswith("old-http://"):
2292 return localrepository(ui, path.replace("old-http://", "http://"))
2293 if path.startswith("ssh://"):
2294 return sshrepository(ui, path)
2295
2296 return localrepository(ui, path, create)
This diff has been collapsed as it changes many lines, (2132 lines changed) Show them Hide them
@@ -1,104 +1,14 b''
1 # hg.py - repository classes for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import sys, struct, os
8 import sys, struct
9 import util
10 from revlog import *
9 from revlog import *
11 from demandload import *
10 from demandload import *
12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
11 demandload(globals(), "bisect")
13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 demandload(globals(), "bisect errno select stat")
15
16 class filelog(revlog):
17 def __init__(self, opener, path):
18 revlog.__init__(self, opener,
19 os.path.join("data", self.encodedir(path + ".i")),
20 os.path.join("data", self.encodedir(path + ".d")))
21
22 # This avoids a collision between a file named foo and a dir named
23 # foo.i or foo.d
24 def encodedir(self, path):
25 return (path
26 .replace(".hg/", ".hg.hg/")
27 .replace(".i/", ".i.hg/")
28 .replace(".d/", ".d.hg/"))
29
30 def decodedir(self, path):
31 return (path
32 .replace(".d.hg/", ".d/")
33 .replace(".i.hg/", ".i/")
34 .replace(".hg.hg/", ".hg/"))
35
36 def read(self, node):
37 t = self.revision(node)
38 if not t.startswith('\1\n'):
39 return t
40 s = t.find('\1\n', 2)
41 return t[s+2:]
42
43 def readmeta(self, node):
44 t = self.revision(node)
45 if not t.startswith('\1\n'):
46 return t
47 s = t.find('\1\n', 2)
48 mt = t[2:s]
49 for l in mt.splitlines():
50 k, v = l.split(": ", 1)
51 m[k] = v
52 return m
53
54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 if meta or text.startswith('\1\n'):
56 mt = ""
57 if meta:
58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 return self.addrevision(text, transaction, link, p1, p2)
61
62 def annotate(self, node):
63
64 def decorate(text, rev):
65 return ([rev] * len(text.splitlines()), text)
66
67 def pair(parent, child):
68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 child[0][b1:b2] = parent[0][a1:a2]
70 return child
71
72 # find all ancestors
73 needed = {node:1}
74 visit = [node]
75 while visit:
76 n = visit.pop(0)
77 for p in self.parents(n):
78 if p not in needed:
79 needed[p] = 1
80 visit.append(p)
81 else:
82 # count how many times we'll use this
83 needed[p] += 1
84
85 # sort by revision which is a topological order
86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 visit.sort()
88 hist = {}
89
90 for r,n in visit:
91 curr = decorate(self.read(n), self.linkrev(n))
92 for p in self.parents(n):
93 if p != nullid:
94 curr = pair(hist[p], curr)
95 # trim the history of unneeded revs
96 needed[p] -= 1
97 if not needed[p]:
98 del hist[p]
99 hist[n] = curr
100
101 return zip(hist[n][0], hist[n][1].splitlines(1))
102
12
103 class manifest(revlog):
13 class manifest(revlog):
104 def __init__(self, opener):
14 def __init__(self, opener):
@@ -258,2039 +168,3 b' class manifest(revlog):'
258 self.addlist = None
168 self.addlist = None
259
169
260 return n
170 return n
261
262 class changelog(revlog):
263 def __init__(self, opener):
264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265
266 def extract(self, text):
267 if not text:
268 return (nullid, "", "0", [], "")
269 last = text.index("\n\n")
270 desc = text[last + 2:]
271 l = text[:last].splitlines()
272 manifest = bin(l[0])
273 user = l[1]
274 date = l[2]
275 if " " not in date:
276 date += " 0" # some tools used -d without a timezone
277 files = l[3:]
278 return (manifest, user, date, files, desc)
279
280 def read(self, node):
281 return self.extract(self.revision(node))
282
283 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
284 user=None, date=None):
285 if not date:
286 if time.daylight: offset = time.altzone
287 else: offset = time.timezone
288 date = "%d %d" % (time.time(), offset)
289 list.sort()
290 l = [hex(manifest), user, date] + list + ["", desc]
291 text = "\n".join(l)
292 return self.addrevision(text, transaction, self.count(), p1, p2)
293
294 class dirstate:
295 def __init__(self, opener, ui, root):
296 self.opener = opener
297 self.root = root
298 self.dirty = 0
299 self.ui = ui
300 self.map = None
301 self.pl = None
302 self.copies = {}
303 self.ignorefunc = None
304
305 def wjoin(self, f):
306 return os.path.join(self.root, f)
307
308 def getcwd(self):
309 cwd = os.getcwd()
310 if cwd == self.root: return ''
311 return cwd[len(self.root) + 1:]
312
313 def ignore(self, f):
314 if not self.ignorefunc:
315 bigpat = []
316 try:
317 l = file(self.wjoin(".hgignore"))
318 for pat in l:
319 p = pat.rstrip()
320 if p:
321 try:
322 re.compile(p)
323 except:
324 self.ui.warn("ignoring invalid ignore"
325 + " regular expression '%s'\n" % p)
326 else:
327 bigpat.append(p)
328 except IOError: pass
329
330 if bigpat:
331 s = "(?:%s)" % (")|(?:".join(bigpat))
332 r = re.compile(s)
333 self.ignorefunc = r.search
334 else:
335 self.ignorefunc = util.never
336
337 return self.ignorefunc(f)
338
339 def __del__(self):
340 if self.dirty:
341 self.write()
342
343 def __getitem__(self, key):
344 try:
345 return self.map[key]
346 except TypeError:
347 self.read()
348 return self[key]
349
350 def __contains__(self, key):
351 if not self.map: self.read()
352 return key in self.map
353
354 def parents(self):
355 if not self.pl:
356 self.read()
357 return self.pl
358
359 def markdirty(self):
360 if not self.dirty:
361 self.dirty = 1
362
363 def setparents(self, p1, p2=nullid):
364 self.markdirty()
365 self.pl = p1, p2
366
367 def state(self, key):
368 try:
369 return self[key][0]
370 except KeyError:
371 return "?"
372
373 def read(self):
374 if self.map is not None: return self.map
375
376 self.map = {}
377 self.pl = [nullid, nullid]
378 try:
379 st = self.opener("dirstate").read()
380 if not st: return
381 except: return
382
383 self.pl = [st[:20], st[20: 40]]
384
385 pos = 40
386 while pos < len(st):
387 e = struct.unpack(">cllll", st[pos:pos+17])
388 l = e[4]
389 pos += 17
390 f = st[pos:pos + l]
391 if '\0' in f:
392 f, c = f.split('\0')
393 self.copies[f] = c
394 self.map[f] = e[:4]
395 pos += l
396
397 def copy(self, source, dest):
398 self.read()
399 self.markdirty()
400 self.copies[dest] = source
401
402 def copied(self, file):
403 return self.copies.get(file, None)
404
405 def update(self, files, state, **kw):
406 ''' current states:
407 n normal
408 m needs merging
409 r marked for removal
410 a marked for addition'''
411
412 if not files: return
413 self.read()
414 self.markdirty()
415 for f in files:
416 if state == "r":
417 self.map[f] = ('r', 0, 0, 0)
418 else:
419 s = os.stat(os.path.join(self.root, f))
420 st_size = kw.get('st_size', s.st_size)
421 st_mtime = kw.get('st_mtime', s.st_mtime)
422 self.map[f] = (state, s.st_mode, st_size, st_mtime)
423
424 def forget(self, files):
425 if not files: return
426 self.read()
427 self.markdirty()
428 for f in files:
429 try:
430 del self.map[f]
431 except KeyError:
432 self.ui.warn("not in dirstate: %s!\n" % f)
433 pass
434
435 def clear(self):
436 self.map = {}
437 self.markdirty()
438
439 def write(self):
440 st = self.opener("dirstate", "w")
441 st.write("".join(self.pl))
442 for f, e in self.map.items():
443 c = self.copied(f)
444 if c:
445 f = f + "\0" + c
446 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
447 st.write(e + f)
448 self.dirty = 0
449
450 def filterfiles(self, files):
451 ret = {}
452 unknown = []
453
454 for x in files:
455 if x is '.':
456 return self.map.copy()
457 if x not in self.map:
458 unknown.append(x)
459 else:
460 ret[x] = self.map[x]
461
462 if not unknown:
463 return ret
464
465 b = self.map.keys()
466 b.sort()
467 blen = len(b)
468
469 for x in unknown:
470 bs = bisect.bisect(b, x)
471 if bs != 0 and b[bs-1] == x:
472 ret[x] = self.map[x]
473 continue
474 while bs < blen:
475 s = b[bs]
476 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
477 ret[s] = self.map[s]
478 else:
479 break
480 bs += 1
481 return ret
482
483 def walk(self, files=None, match=util.always, dc=None):
484 self.read()
485
486 # walk all files by default
487 if not files:
488 files = [self.root]
489 if not dc:
490 dc = self.map.copy()
491 elif not dc:
492 dc = self.filterfiles(files)
493
494 known = {'.hg': 1}
495 def seen(fn):
496 if fn in known: return True
497 known[fn] = 1
498 def traverse():
499 for ff in util.unique(files):
500 f = os.path.join(self.root, ff)
501 try:
502 st = os.stat(f)
503 except OSError, inst:
504 if ff not in dc: self.ui.warn('%s: %s\n' % (
505 util.pathto(self.getcwd(), ff),
506 inst.strerror))
507 continue
508 if stat.S_ISDIR(st.st_mode):
509 for dir, subdirs, fl in os.walk(f):
510 d = dir[len(self.root) + 1:]
511 nd = util.normpath(d)
512 if nd == '.': nd = ''
513 if seen(nd):
514 subdirs[:] = []
515 continue
516 for sd in subdirs:
517 ds = os.path.join(nd, sd +'/')
518 if self.ignore(ds) or not match(ds):
519 subdirs.remove(sd)
520 subdirs.sort()
521 fl.sort()
522 for fn in fl:
523 fn = util.pconvert(os.path.join(d, fn))
524 yield 'f', fn
525 elif stat.S_ISREG(st.st_mode):
526 yield 'f', ff
527 else:
528 kind = 'unknown'
529 if stat.S_ISCHR(st.st_mode): kind = 'character device'
530 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
531 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
532 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
533 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
534 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
535 util.pathto(self.getcwd(), ff),
536 kind))
537
538 ks = dc.keys()
539 ks.sort()
540 for k in ks:
541 yield 'm', k
542
543 # yield only files that match: all in dirstate, others only if
544 # not in .hgignore
545
546 for src, fn in util.unique(traverse()):
547 fn = util.normpath(fn)
548 if seen(fn): continue
549 if fn not in dc and self.ignore(fn):
550 continue
551 if match(fn):
552 yield src, fn
553
554 def changes(self, files=None, match=util.always):
555 self.read()
556 if not files:
557 dc = self.map.copy()
558 else:
559 dc = self.filterfiles(files)
560 lookup, modified, added, unknown = [], [], [], []
561 removed, deleted = [], []
562
563 for src, fn in self.walk(files, match, dc=dc):
564 try:
565 s = os.stat(os.path.join(self.root, fn))
566 except OSError:
567 continue
568 if not stat.S_ISREG(s.st_mode):
569 continue
570 c = dc.get(fn)
571 if c:
572 del dc[fn]
573 if c[0] == 'm':
574 modified.append(fn)
575 elif c[0] == 'a':
576 added.append(fn)
577 elif c[0] == 'r':
578 unknown.append(fn)
579 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
580 modified.append(fn)
581 elif c[3] != s.st_mtime:
582 lookup.append(fn)
583 else:
584 unknown.append(fn)
585
586 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
587 if c[0] == 'r':
588 removed.append(fn)
589 else:
590 deleted.append(fn)
591 return (lookup, modified, added, removed + deleted, unknown)
592
593 # used to avoid circular references so destructors work
594 def opener(base):
595 p = base
596 def o(path, mode="r"):
597 if p.startswith("http://"):
598 f = os.path.join(p, urllib.quote(path))
599 return httprangereader.httprangereader(f)
600
601 f = os.path.join(p, path)
602
603 mode += "b" # for that other OS
604
605 if mode[0] != "r":
606 try:
607 s = os.stat(f)
608 except OSError:
609 d = os.path.dirname(f)
610 if not os.path.isdir(d):
611 os.makedirs(d)
612 else:
613 if s.st_nlink > 1:
614 file(f + ".tmp", "wb").write(file(f, "rb").read())
615 util.rename(f+".tmp", f)
616
617 return file(f, mode)
618
619 return o
620
621 class RepoError(Exception): pass
622
623 class localrepository:
624 def __init__(self, ui, path=None, create=0):
625 self.remote = 0
626 if path and path.startswith("http://"):
627 self.remote = 1
628 self.path = path
629 else:
630 if not path:
631 p = os.getcwd()
632 while not os.path.isdir(os.path.join(p, ".hg")):
633 oldp = p
634 p = os.path.dirname(p)
635 if p == oldp: raise RepoError("no repo found")
636 path = p
637 self.path = os.path.join(path, ".hg")
638
639 if not create and not os.path.isdir(self.path):
640 raise RepoError("repository %s not found" % self.path)
641
642 self.root = os.path.abspath(path)
643 self.ui = ui
644
645 if create:
646 os.mkdir(self.path)
647 os.mkdir(self.join("data"))
648
649 self.opener = opener(self.path)
650 self.wopener = opener(self.root)
651 self.manifest = manifest(self.opener)
652 self.changelog = changelog(self.opener)
653 self.tagscache = None
654 self.nodetagscache = None
655
656 if not self.remote:
657 self.dirstate = dirstate(self.opener, ui, self.root)
658 try:
659 self.ui.readconfig(self.opener("hgrc"))
660 except IOError: pass
661
662 def hook(self, name, **args):
663 s = self.ui.config("hooks", name)
664 if s:
665 self.ui.note("running hook %s: %s\n" % (name, s))
666 old = {}
667 for k, v in args.items():
668 k = k.upper()
669 old[k] = os.environ.get(k, None)
670 os.environ[k] = v
671
672 r = os.system(s)
673
674 for k, v in old.items():
675 if v != None:
676 os.environ[k] = v
677 else:
678 del os.environ[k]
679
680 if r:
681 self.ui.warn("abort: %s hook failed with status %d!\n" %
682 (name, r))
683 return False
684 return True
685
686 def tags(self):
687 '''return a mapping of tag to node'''
688 if not self.tagscache:
689 self.tagscache = {}
690 def addtag(self, k, n):
691 try:
692 bin_n = bin(n)
693 except TypeError:
694 bin_n = ''
695 self.tagscache[k.strip()] = bin_n
696
697 try:
698 # read each head of the tags file, ending with the tip
699 # and add each tag found to the map, with "newer" ones
700 # taking precedence
701 fl = self.file(".hgtags")
702 h = fl.heads()
703 h.reverse()
704 for r in h:
705 for l in fl.read(r).splitlines():
706 if l:
707 n, k = l.split(" ", 1)
708 addtag(self, k, n)
709 except KeyError:
710 pass
711
712 try:
713 f = self.opener("localtags")
714 for l in f:
715 n, k = l.split(" ", 1)
716 addtag(self, k, n)
717 except IOError:
718 pass
719
720 self.tagscache['tip'] = self.changelog.tip()
721
722 return self.tagscache
723
724 def tagslist(self):
725 '''return a list of tags ordered by revision'''
726 l = []
727 for t, n in self.tags().items():
728 try:
729 r = self.changelog.rev(n)
730 except:
731 r = -2 # sort to the beginning of the list if unknown
732 l.append((r,t,n))
733 l.sort()
734 return [(t,n) for r,t,n in l]
735
736 def nodetags(self, node):
737 '''return the tags associated with a node'''
738 if not self.nodetagscache:
739 self.nodetagscache = {}
740 for t,n in self.tags().items():
741 self.nodetagscache.setdefault(n,[]).append(t)
742 return self.nodetagscache.get(node, [])
743
744 def lookup(self, key):
745 try:
746 return self.tags()[key]
747 except KeyError:
748 try:
749 return self.changelog.lookup(key)
750 except:
751 raise RepoError("unknown revision '%s'" % key)
752
753 def dev(self):
754 if self.remote: return -1
755 return os.stat(self.path).st_dev
756
757 def local(self):
758 return not self.remote
759
760 def join(self, f):
761 return os.path.join(self.path, f)
762
763 def wjoin(self, f):
764 return os.path.join(self.root, f)
765
766 def file(self, f):
767 if f[0] == '/': f = f[1:]
768 return filelog(self.opener, f)
769
770 def getcwd(self):
771 return self.dirstate.getcwd()
772
773 def wfile(self, f, mode='r'):
774 return self.wopener(f, mode)
775
776 def wread(self, filename):
777 return self.wopener(filename, 'r').read()
778
779 def wwrite(self, filename, data, fd=None):
780 if fd:
781 return fd.write(data)
782 return self.wopener(filename, 'w').write(data)
783
784 def transaction(self):
785 # save dirstate for undo
786 try:
787 ds = self.opener("dirstate").read()
788 except IOError:
789 ds = ""
790 self.opener("journal.dirstate", "w").write(ds)
791
792 def after():
793 util.rename(self.join("journal"), self.join("undo"))
794 util.rename(self.join("journal.dirstate"),
795 self.join("undo.dirstate"))
796
797 return transaction.transaction(self.ui.warn, self.opener,
798 self.join("journal"), after)
799
800 def recover(self):
801 lock = self.lock()
802 if os.path.exists(self.join("journal")):
803 self.ui.status("rolling back interrupted transaction\n")
804 return transaction.rollback(self.opener, self.join("journal"))
805 else:
806 self.ui.warn("no interrupted transaction available\n")
807
808 def undo(self):
809 lock = self.lock()
810 if os.path.exists(self.join("undo")):
811 self.ui.status("rolling back last transaction\n")
812 transaction.rollback(self.opener, self.join("undo"))
813 self.dirstate = None
814 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
815 self.dirstate = dirstate(self.opener, self.ui, self.root)
816 else:
817 self.ui.warn("no undo information available\n")
818
819 def lock(self, wait=1):
820 try:
821 return lock.lock(self.join("lock"), 0)
822 except lock.LockHeld, inst:
823 if wait:
824 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
825 return lock.lock(self.join("lock"), wait)
826 raise inst
827
828 def rawcommit(self, files, text, user, date, p1=None, p2=None):
829 orig_parent = self.dirstate.parents()[0] or nullid
830 p1 = p1 or self.dirstate.parents()[0] or nullid
831 p2 = p2 or self.dirstate.parents()[1] or nullid
832 c1 = self.changelog.read(p1)
833 c2 = self.changelog.read(p2)
834 m1 = self.manifest.read(c1[0])
835 mf1 = self.manifest.readflags(c1[0])
836 m2 = self.manifest.read(c2[0])
837 changed = []
838
839 if orig_parent == p1:
840 update_dirstate = 1
841 else:
842 update_dirstate = 0
843
844 tr = self.transaction()
845 mm = m1.copy()
846 mfm = mf1.copy()
847 linkrev = self.changelog.count()
848 for f in files:
849 try:
850 t = self.wread(f)
851 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
852 r = self.file(f)
853 mfm[f] = tm
854
855 fp1 = m1.get(f, nullid)
856 fp2 = m2.get(f, nullid)
857
858 # is the same revision on two branches of a merge?
859 if fp2 == fp1:
860 fp2 = nullid
861
862 if fp2 != nullid:
863 # is one parent an ancestor of the other?
864 fpa = r.ancestor(fp1, fp2)
865 if fpa == fp1:
866 fp1, fp2 = fp2, nullid
867 elif fpa == fp2:
868 fp2 = nullid
869
870 # is the file unmodified from the parent?
871 if t == r.read(fp1):
872 # record the proper existing parent in manifest
873 # no need to add a revision
874 mm[f] = fp1
875 continue
876
877 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
878 changed.append(f)
879 if update_dirstate:
880 self.dirstate.update([f], "n")
881 except IOError:
882 try:
883 del mm[f]
884 del mfm[f]
885 if update_dirstate:
886 self.dirstate.forget([f])
887 except:
888 # deleted from p2?
889 pass
890
891 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
892 user = user or self.ui.username()
893 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
894 tr.close()
895 if update_dirstate:
896 self.dirstate.setparents(n, nullid)
897
898 def commit(self, files = None, text = "", user = None, date = None,
899 match = util.always, force=False):
900 commit = []
901 remove = []
902 changed = []
903
904 if files:
905 for f in files:
906 s = self.dirstate.state(f)
907 if s in 'nmai':
908 commit.append(f)
909 elif s == 'r':
910 remove.append(f)
911 else:
912 self.ui.warn("%s not tracked!\n" % f)
913 else:
914 (c, a, d, u) = self.changes(match=match)
915 commit = c + a
916 remove = d
917
918 p1, p2 = self.dirstate.parents()
919 c1 = self.changelog.read(p1)
920 c2 = self.changelog.read(p2)
921 m1 = self.manifest.read(c1[0])
922 mf1 = self.manifest.readflags(c1[0])
923 m2 = self.manifest.read(c2[0])
924
925 if not commit and not remove and not force and p2 == nullid:
926 self.ui.status("nothing changed\n")
927 return None
928
929 if not self.hook("precommit"):
930 return None
931
932 lock = self.lock()
933 tr = self.transaction()
934
935 # check in files
936 new = {}
937 linkrev = self.changelog.count()
938 commit.sort()
939 for f in commit:
940 self.ui.note(f + "\n")
941 try:
942 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
943 t = self.wread(f)
944 except IOError:
945 self.ui.warn("trouble committing %s!\n" % f)
946 raise
947
948 meta = {}
949 cp = self.dirstate.copied(f)
950 if cp:
951 meta["copy"] = cp
952 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
953 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
954
955 r = self.file(f)
956 fp1 = m1.get(f, nullid)
957 fp2 = m2.get(f, nullid)
958
959 # is the same revision on two branches of a merge?
960 if fp2 == fp1:
961 fp2 = nullid
962
963 if fp2 != nullid:
964 # is one parent an ancestor of the other?
965 fpa = r.ancestor(fp1, fp2)
966 if fpa == fp1:
967 fp1, fp2 = fp2, nullid
968 elif fpa == fp2:
969 fp2 = nullid
970
971 # is the file unmodified from the parent?
972 if not meta and t == r.read(fp1):
973 # record the proper existing parent in manifest
974 # no need to add a revision
975 new[f] = fp1
976 continue
977
978 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
979 # remember what we've added so that we can later calculate
980 # the files to pull from a set of changesets
981 changed.append(f)
982
983 # update manifest
984 m1.update(new)
985 for f in remove:
986 if f in m1:
987 del m1[f]
988 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
989 (new, remove))
990
991 # add changeset
992 new = new.keys()
993 new.sort()
994
995 if not text:
996 edittext = ""
997 if p2 != nullid:
998 edittext += "HG: branch merge\n"
999 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
1000 edittext += "".join(["HG: changed %s\n" % f for f in changed])
1001 edittext += "".join(["HG: removed %s\n" % f for f in remove])
1002 if not changed and not remove:
1003 edittext += "HG: no files changed\n"
1004 edittext = self.ui.edit(edittext)
1005 if not edittext.rstrip():
1006 return None
1007 text = edittext
1008
1009 user = user or self.ui.username()
1010 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
1011 tr.close()
1012
1013 self.dirstate.setparents(n)
1014 self.dirstate.update(new, "n")
1015 self.dirstate.forget(remove)
1016
1017 if not self.hook("commit", node=hex(n)):
1018 return None
1019 return n
1020
1021 def walk(self, node=None, files=[], match=util.always):
1022 if node:
1023 for fn in self.manifest.read(self.changelog.read(node)[0]):
1024 if match(fn): yield 'm', fn
1025 else:
1026 for src, fn in self.dirstate.walk(files, match):
1027 yield src, fn
1028
1029 def changes(self, node1 = None, node2 = None, files = [],
1030 match = util.always):
1031 mf2, u = None, []
1032
1033 def fcmp(fn, mf):
1034 t1 = self.wread(fn)
1035 t2 = self.file(fn).read(mf.get(fn, nullid))
1036 return cmp(t1, t2)
1037
1038 def mfmatches(node):
1039 mf = dict(self.manifest.read(node))
1040 for fn in mf.keys():
1041 if not match(fn):
1042 del mf[fn]
1043 return mf
1044
1045 # are we comparing the working directory?
1046 if not node2:
1047 l, c, a, d, u = self.dirstate.changes(files, match)
1048
1049 # are we comparing working dir against its parent?
1050 if not node1:
1051 if l:
1052 # do a full compare of any files that might have changed
1053 change = self.changelog.read(self.dirstate.parents()[0])
1054 mf2 = mfmatches(change[0])
1055 for f in l:
1056 if fcmp(f, mf2):
1057 c.append(f)
1058
1059 for l in c, a, d, u:
1060 l.sort()
1061
1062 return (c, a, d, u)
1063
1064 # are we comparing working dir against non-tip?
1065 # generate a pseudo-manifest for the working dir
1066 if not node2:
1067 if not mf2:
1068 change = self.changelog.read(self.dirstate.parents()[0])
1069 mf2 = mfmatches(change[0])
1070 for f in a + c + l:
1071 mf2[f] = ""
1072 for f in d:
1073 if f in mf2: del mf2[f]
1074 else:
1075 change = self.changelog.read(node2)
1076 mf2 = mfmatches(change[0])
1077
1078 # flush lists from dirstate before comparing manifests
1079 c, a = [], []
1080
1081 change = self.changelog.read(node1)
1082 mf1 = mfmatches(change[0])
1083
1084 for fn in mf2:
1085 if mf1.has_key(fn):
1086 if mf1[fn] != mf2[fn]:
1087 if mf2[fn] != "" or fcmp(fn, mf1):
1088 c.append(fn)
1089 del mf1[fn]
1090 else:
1091 a.append(fn)
1092
1093 d = mf1.keys()
1094
1095 for l in c, a, d, u:
1096 l.sort()
1097
1098 return (c, a, d, u)
1099
1100 def add(self, list):
1101 for f in list:
1102 p = self.wjoin(f)
1103 if not os.path.exists(p):
1104 self.ui.warn("%s does not exist!\n" % f)
1105 elif not os.path.isfile(p):
1106 self.ui.warn("%s not added: only files supported currently\n" % f)
1107 elif self.dirstate.state(f) in 'an':
1108 self.ui.warn("%s already tracked!\n" % f)
1109 else:
1110 self.dirstate.update([f], "a")
1111
1112 def forget(self, list):
1113 for f in list:
1114 if self.dirstate.state(f) not in 'ai':
1115 self.ui.warn("%s not added!\n" % f)
1116 else:
1117 self.dirstate.forget([f])
1118
1119 def remove(self, list):
1120 for f in list:
1121 p = self.wjoin(f)
1122 if os.path.exists(p):
1123 self.ui.warn("%s still exists!\n" % f)
1124 elif self.dirstate.state(f) == 'a':
1125 self.ui.warn("%s never committed!\n" % f)
1126 self.dirstate.forget([f])
1127 elif f not in self.dirstate:
1128 self.ui.warn("%s not tracked!\n" % f)
1129 else:
1130 self.dirstate.update([f], "r")
1131
1132 def copy(self, source, dest):
1133 p = self.wjoin(dest)
1134 if not os.path.exists(p):
1135 self.ui.warn("%s does not exist!\n" % dest)
1136 elif not os.path.isfile(p):
1137 self.ui.warn("copy failed: %s is not a file\n" % dest)
1138 else:
1139 if self.dirstate.state(dest) == '?':
1140 self.dirstate.update([dest], "a")
1141 self.dirstate.copy(source, dest)
1142
1143 def heads(self):
1144 return self.changelog.heads()
1145
1146 # branchlookup returns a dict giving a list of branches for
1147 # each head. A branch is defined as the tag of a node or
1148 # the branch of the node's parents. If a node has multiple
1149 # branch tags, tags are eliminated if they are visible from other
1150 # branch tags.
1151 #
1152 # So, for this graph: a->b->c->d->e
1153 # \ /
1154 # aa -----/
1155 # a has tag 2.6.12
1156 # d has tag 2.6.13
1157 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1158 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1159 # from the list.
1160 #
1161 # It is possible that more than one head will have the same branch tag.
1162 # callers need to check the result for multiple heads under the same
1163 # branch tag if that is a problem for them (ie checkout of a specific
1164 # branch).
1165 #
1166 # passing in a specific branch will limit the depth of the search
1167 # through the parents. It won't limit the branches returned in the
1168 # result though.
1169 def branchlookup(self, heads=None, branch=None):
1170 if not heads:
1171 heads = self.heads()
1172 headt = [ h for h in heads ]
1173 chlog = self.changelog
1174 branches = {}
1175 merges = []
1176 seenmerge = {}
1177
1178 # traverse the tree once for each head, recording in the branches
1179 # dict which tags are visible from this head. The branches
1180 # dict also records which tags are visible from each tag
1181 # while we traverse.
1182 while headt or merges:
1183 if merges:
1184 n, found = merges.pop()
1185 visit = [n]
1186 else:
1187 h = headt.pop()
1188 visit = [h]
1189 found = [h]
1190 seen = {}
1191 while visit:
1192 n = visit.pop()
1193 if n in seen:
1194 continue
1195 pp = chlog.parents(n)
1196 tags = self.nodetags(n)
1197 if tags:
1198 for x in tags:
1199 if x == 'tip':
1200 continue
1201 for f in found:
1202 branches.setdefault(f, {})[n] = 1
1203 branches.setdefault(n, {})[n] = 1
1204 break
1205 if n not in found:
1206 found.append(n)
1207 if branch in tags:
1208 continue
1209 seen[n] = 1
1210 if pp[1] != nullid and n not in seenmerge:
1211 merges.append((pp[1], [x for x in found]))
1212 seenmerge[n] = 1
1213 if pp[0] != nullid:
1214 visit.append(pp[0])
1215 # traverse the branches dict, eliminating branch tags from each
1216 # head that are visible from another branch tag for that head.
1217 out = {}
1218 viscache = {}
1219 for h in heads:
1220 def visible(node):
1221 if node in viscache:
1222 return viscache[node]
1223 ret = {}
1224 visit = [node]
1225 while visit:
1226 x = visit.pop()
1227 if x in viscache:
1228 ret.update(viscache[x])
1229 elif x not in ret:
1230 ret[x] = 1
1231 if x in branches:
1232 visit[len(visit):] = branches[x].keys()
1233 viscache[node] = ret
1234 return ret
1235 if h not in branches:
1236 continue
1237 # O(n^2), but somewhat limited. This only searches the
1238 # tags visible from a specific head, not all the tags in the
1239 # whole repo.
1240 for b in branches[h]:
1241 vis = False
1242 for bb in branches[h].keys():
1243 if b != bb:
1244 if b in visible(bb):
1245 vis = True
1246 break
1247 if not vis:
1248 l = out.setdefault(h, [])
1249 l[len(l):] = self.nodetags(b)
1250 return out
1251
1252 def branches(self, nodes):
1253 if not nodes: nodes = [self.changelog.tip()]
1254 b = []
1255 for n in nodes:
1256 t = n
1257 while n:
1258 p = self.changelog.parents(n)
1259 if p[1] != nullid or p[0] == nullid:
1260 b.append((t, n, p[0], p[1]))
1261 break
1262 n = p[0]
1263 return b
1264
1265 def between(self, pairs):
1266 r = []
1267
1268 for top, bottom in pairs:
1269 n, l, i = top, [], 0
1270 f = 1
1271
1272 while n != bottom:
1273 p = self.changelog.parents(n)[0]
1274 if i == f:
1275 l.append(n)
1276 f = f * 2
1277 n = p
1278 i += 1
1279
1280 r.append(l)
1281
1282 return r
1283
1284 def newer(self, nodes):
1285 m = {}
1286 nl = []
1287 pm = {}
1288 cl = self.changelog
1289 t = l = cl.count()
1290
1291 # find the lowest numbered node
1292 for n in nodes:
1293 l = min(l, cl.rev(n))
1294 m[n] = 1
1295
1296 for i in xrange(l, t):
1297 n = cl.node(i)
1298 if n in m: # explicitly listed
1299 pm[n] = 1
1300 nl.append(n)
1301 continue
1302 for p in cl.parents(n):
1303 if p in pm: # parent listed
1304 pm[n] = 1
1305 nl.append(n)
1306 break
1307
1308 return nl
1309
1310 def findincoming(self, remote, base=None, heads=None):
1311 m = self.changelog.nodemap
1312 search = []
1313 fetch = {}
1314 seen = {}
1315 seenbranch = {}
1316 if base == None:
1317 base = {}
1318
1319 # assume we're closer to the tip than the root
1320 # and start by examining the heads
1321 self.ui.status("searching for changes\n")
1322
1323 if not heads:
1324 heads = remote.heads()
1325
1326 unknown = []
1327 for h in heads:
1328 if h not in m:
1329 unknown.append(h)
1330 else:
1331 base[h] = 1
1332
1333 if not unknown:
1334 return None
1335
1336 rep = {}
1337 reqcnt = 0
1338
1339 # search through remote branches
1340 # a 'branch' here is a linear segment of history, with four parts:
1341 # head, root, first parent, second parent
1342 # (a branch always has two parents (or none) by definition)
1343 unknown = remote.branches(unknown)
1344 while unknown:
1345 r = []
1346 while unknown:
1347 n = unknown.pop(0)
1348 if n[0] in seen:
1349 continue
1350
1351 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1352 if n[0] == nullid:
1353 break
1354 if n in seenbranch:
1355 self.ui.debug("branch already found\n")
1356 continue
1357 if n[1] and n[1] in m: # do we know the base?
1358 self.ui.debug("found incomplete branch %s:%s\n"
1359 % (short(n[0]), short(n[1])))
1360 search.append(n) # schedule branch range for scanning
1361 seenbranch[n] = 1
1362 else:
1363 if n[1] not in seen and n[1] not in fetch:
1364 if n[2] in m and n[3] in m:
1365 self.ui.debug("found new changeset %s\n" %
1366 short(n[1]))
1367 fetch[n[1]] = 1 # earliest unknown
1368 base[n[2]] = 1 # latest known
1369 continue
1370
1371 for a in n[2:4]:
1372 if a not in rep:
1373 r.append(a)
1374 rep[a] = 1
1375
1376 seen[n[0]] = 1
1377
1378 if r:
1379 reqcnt += 1
1380 self.ui.debug("request %d: %s\n" %
1381 (reqcnt, " ".join(map(short, r))))
1382 for p in range(0, len(r), 10):
1383 for b in remote.branches(r[p:p+10]):
1384 self.ui.debug("received %s:%s\n" %
1385 (short(b[0]), short(b[1])))
1386 if b[0] in m:
1387 self.ui.debug("found base node %s\n" % short(b[0]))
1388 base[b[0]] = 1
1389 elif b[0] not in seen:
1390 unknown.append(b)
1391
1392 # do binary search on the branches we found
1393 while search:
1394 n = search.pop(0)
1395 reqcnt += 1
1396 l = remote.between([(n[0], n[1])])[0]
1397 l.append(n[1])
1398 p = n[0]
1399 f = 1
1400 for i in l:
1401 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1402 if i in m:
1403 if f <= 2:
1404 self.ui.debug("found new branch changeset %s\n" %
1405 short(p))
1406 fetch[p] = 1
1407 base[i] = 1
1408 else:
1409 self.ui.debug("narrowed branch search to %s:%s\n"
1410 % (short(p), short(i)))
1411 search.append((p, i))
1412 break
1413 p, f = i, f * 2
1414
1415 # sanity check our fetch list
1416 for f in fetch.keys():
1417 if f in m:
1418 raise RepoError("already have changeset " + short(f[:4]))
1419
1420 if base.keys() == [nullid]:
1421 self.ui.warn("warning: pulling from an unrelated repository!\n")
1422
1423 self.ui.note("found new changesets starting at " +
1424 " ".join([short(f) for f in fetch]) + "\n")
1425
1426 self.ui.debug("%d total queries\n" % reqcnt)
1427
1428 return fetch.keys()
1429
1430 def findoutgoing(self, remote, base=None, heads=None):
1431 if base == None:
1432 base = {}
1433 self.findincoming(remote, base, heads)
1434
1435 self.ui.debug("common changesets up to "
1436 + " ".join(map(short, base.keys())) + "\n")
1437
1438 remain = dict.fromkeys(self.changelog.nodemap)
1439
1440 # prune everything remote has from the tree
1441 del remain[nullid]
1442 remove = base.keys()
1443 while remove:
1444 n = remove.pop(0)
1445 if n in remain:
1446 del remain[n]
1447 for p in self.changelog.parents(n):
1448 remove.append(p)
1449
1450 # find every node whose parents have been pruned
1451 subset = []
1452 for n in remain:
1453 p1, p2 = self.changelog.parents(n)
1454 if p1 not in remain and p2 not in remain:
1455 subset.append(n)
1456
1457 # this is the set of all roots we have to push
1458 return subset
1459
1460 def pull(self, remote):
1461 lock = self.lock()
1462
1463 # if we have an empty repo, fetch everything
1464 if self.changelog.tip() == nullid:
1465 self.ui.status("requesting all changes\n")
1466 fetch = [nullid]
1467 else:
1468 fetch = self.findincoming(remote)
1469
1470 if not fetch:
1471 self.ui.status("no changes found\n")
1472 return 1
1473
1474 cg = remote.changegroup(fetch)
1475 return self.addchangegroup(cg)
1476
1477 def push(self, remote, force=False):
1478 lock = remote.lock()
1479
1480 base = {}
1481 heads = remote.heads()
1482 inc = self.findincoming(remote, base, heads)
1483 if not force and inc:
1484 self.ui.warn("abort: unsynced remote changes!\n")
1485 self.ui.status("(did you forget to sync? use push -f to force)\n")
1486 return 1
1487
1488 update = self.findoutgoing(remote, base)
1489 if not update:
1490 self.ui.status("no changes found\n")
1491 return 1
1492 elif not force:
1493 if len(heads) < len(self.changelog.heads()):
1494 self.ui.warn("abort: push creates new remote branches!\n")
1495 self.ui.status("(did you forget to merge?" +
1496 " use push -f to force)\n")
1497 return 1
1498
1499 cg = self.changegroup(update)
1500 return remote.addchangegroup(cg)
1501
1502 def changegroup(self, basenodes):
1503 class genread:
1504 def __init__(self, generator):
1505 self.g = generator
1506 self.buf = ""
1507 def fillbuf(self):
1508 self.buf += "".join(self.g)
1509
1510 def read(self, l):
1511 while l > len(self.buf):
1512 try:
1513 self.buf += self.g.next()
1514 except StopIteration:
1515 break
1516 d, self.buf = self.buf[:l], self.buf[l:]
1517 return d
1518
1519 def gengroup():
1520 nodes = self.newer(basenodes)
1521
1522 # construct the link map
1523 linkmap = {}
1524 for n in nodes:
1525 linkmap[self.changelog.rev(n)] = n
1526
1527 # construct a list of all changed files
1528 changed = {}
1529 for n in nodes:
1530 c = self.changelog.read(n)
1531 for f in c[3]:
1532 changed[f] = 1
1533 changed = changed.keys()
1534 changed.sort()
1535
1536 # the changegroup is changesets + manifests + all file revs
1537 revs = [ self.changelog.rev(n) for n in nodes ]
1538
1539 for y in self.changelog.group(linkmap): yield y
1540 for y in self.manifest.group(linkmap): yield y
1541 for f in changed:
1542 yield struct.pack(">l", len(f) + 4) + f
1543 g = self.file(f).group(linkmap)
1544 for y in g:
1545 yield y
1546
1547 yield struct.pack(">l", 0)
1548
1549 return genread(gengroup())
1550
1551 def addchangegroup(self, source):
1552
1553 def getchunk():
1554 d = source.read(4)
1555 if not d: return ""
1556 l = struct.unpack(">l", d)[0]
1557 if l <= 4: return ""
1558 return source.read(l - 4)
1559
1560 def getgroup():
1561 while 1:
1562 c = getchunk()
1563 if not c: break
1564 yield c
1565
1566 def csmap(x):
1567 self.ui.debug("add changeset %s\n" % short(x))
1568 return self.changelog.count()
1569
1570 def revmap(x):
1571 return self.changelog.rev(x)
1572
1573 if not source: return
1574 changesets = files = revisions = 0
1575
1576 tr = self.transaction()
1577
1578 oldheads = len(self.changelog.heads())
1579
1580 # pull off the changeset group
1581 self.ui.status("adding changesets\n")
1582 co = self.changelog.tip()
1583 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1584 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1585
1586 # pull off the manifest group
1587 self.ui.status("adding manifests\n")
1588 mm = self.manifest.tip()
1589 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1590
1591 # process the files
1592 self.ui.status("adding file changes\n")
1593 while 1:
1594 f = getchunk()
1595 if not f: break
1596 self.ui.debug("adding %s revisions\n" % f)
1597 fl = self.file(f)
1598 o = fl.count()
1599 n = fl.addgroup(getgroup(), revmap, tr)
1600 revisions += fl.count() - o
1601 files += 1
1602
1603 newheads = len(self.changelog.heads())
1604 heads = ""
1605 if oldheads and newheads > oldheads:
1606 heads = " (+%d heads)" % (newheads - oldheads)
1607
1608 self.ui.status(("added %d changesets" +
1609 " with %d changes to %d files%s\n")
1610 % (changesets, revisions, files, heads))
1611
1612 tr.close()
1613
1614 if not self.hook("changegroup"):
1615 return 1
1616
1617 return
1618
1619 def update(self, node, allow=False, force=False, choose=None,
1620 moddirstate=True):
1621 pl = self.dirstate.parents()
1622 if not force and pl[1] != nullid:
1623 self.ui.warn("aborting: outstanding uncommitted merges\n")
1624 return 1
1625
1626 p1, p2 = pl[0], node
1627 pa = self.changelog.ancestor(p1, p2)
1628 m1n = self.changelog.read(p1)[0]
1629 m2n = self.changelog.read(p2)[0]
1630 man = self.manifest.ancestor(m1n, m2n)
1631 m1 = self.manifest.read(m1n)
1632 mf1 = self.manifest.readflags(m1n)
1633 m2 = self.manifest.read(m2n)
1634 mf2 = self.manifest.readflags(m2n)
1635 ma = self.manifest.read(man)
1636 mfa = self.manifest.readflags(man)
1637
1638 (c, a, d, u) = self.changes()
1639
1640 # is this a jump, or a merge? i.e. is there a linear path
1641 # from p1 to p2?
1642 linear_path = (pa == p1 or pa == p2)
1643
1644 # resolve the manifest to determine which files
1645 # we care about merging
1646 self.ui.note("resolving manifests\n")
1647 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1648 (force, allow, moddirstate, linear_path))
1649 self.ui.debug(" ancestor %s local %s remote %s\n" %
1650 (short(man), short(m1n), short(m2n)))
1651
1652 merge = {}
1653 get = {}
1654 remove = []
1655
1656 # construct a working dir manifest
1657 mw = m1.copy()
1658 mfw = mf1.copy()
1659 umap = dict.fromkeys(u)
1660
1661 for f in a + c + u:
1662 mw[f] = ""
1663 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1664
1665 for f in d:
1666 if f in mw: del mw[f]
1667
1668 # If we're jumping between revisions (as opposed to merging),
1669 # and if neither the working directory nor the target rev has
1670 # the file, then we need to remove it from the dirstate, to
1671 # prevent the dirstate from listing the file when it is no
1672 # longer in the manifest.
1673 if moddirstate and linear_path and f not in m2:
1674 self.dirstate.forget((f,))
1675
1676 # Compare manifests
1677 for f, n in mw.iteritems():
1678 if choose and not choose(f): continue
1679 if f in m2:
1680 s = 0
1681
1682 # is the wfile new since m1, and match m2?
1683 if f not in m1:
1684 t1 = self.wread(f)
1685 t2 = self.file(f).read(m2[f])
1686 if cmp(t1, t2) == 0:
1687 n = m2[f]
1688 del t1, t2
1689
1690 # are files different?
1691 if n != m2[f]:
1692 a = ma.get(f, nullid)
1693 # are both different from the ancestor?
1694 if n != a and m2[f] != a:
1695 self.ui.debug(" %s versions differ, resolve\n" % f)
1696 # merge executable bits
1697 # "if we changed or they changed, change in merge"
1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1699 mode = ((a^b) | (a^c)) ^ a
1700 merge[f] = (m1.get(f, nullid), m2[f], mode)
1701 s = 1
1702 # are we clobbering?
1703 # is remote's version newer?
1704 # or are we going back in time?
1705 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1706 self.ui.debug(" remote %s is newer, get\n" % f)
1707 get[f] = m2[f]
1708 s = 1
1709 elif f in umap:
1710 # this unknown file is the same as the checkout
1711 get[f] = m2[f]
1712
1713 if not s and mfw[f] != mf2[f]:
1714 if force:
1715 self.ui.debug(" updating permissions for %s\n" % f)
1716 util.set_exec(self.wjoin(f), mf2[f])
1717 else:
1718 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1719 mode = ((a^b) | (a^c)) ^ a
1720 if mode != b:
1721 self.ui.debug(" updating permissions for %s\n" % f)
1722 util.set_exec(self.wjoin(f), mode)
1723 del m2[f]
1724 elif f in ma:
1725 if n != ma[f]:
1726 r = "d"
1727 if not force and (linear_path or allow):
1728 r = self.ui.prompt(
1729 (" local changed %s which remote deleted\n" % f) +
1730 "(k)eep or (d)elete?", "[kd]", "k")
1731 if r == "d":
1732 remove.append(f)
1733 else:
1734 self.ui.debug("other deleted %s\n" % f)
1735 remove.append(f) # other deleted it
1736 else:
1737 if n == m1.get(f, nullid): # same as parent
1738 if p2 == pa: # going backwards?
1739 self.ui.debug("remote deleted %s\n" % f)
1740 remove.append(f)
1741 else:
1742 self.ui.debug("local created %s, keeping\n" % f)
1743 else:
1744 self.ui.debug("working dir created %s, keeping\n" % f)
1745
1746 for f, n in m2.iteritems():
1747 if choose and not choose(f): continue
1748 if f[0] == "/": continue
1749 if f in ma and n != ma[f]:
1750 r = "k"
1751 if not force and (linear_path or allow):
1752 r = self.ui.prompt(
1753 ("remote changed %s which local deleted\n" % f) +
1754 "(k)eep or (d)elete?", "[kd]", "k")
1755 if r == "k": get[f] = n
1756 elif f not in ma:
1757 self.ui.debug("remote created %s\n" % f)
1758 get[f] = n
1759 else:
1760 if force or p2 == pa: # going backwards?
1761 self.ui.debug("local deleted %s, recreating\n" % f)
1762 get[f] = n
1763 else:
1764 self.ui.debug("local deleted %s\n" % f)
1765
1766 del mw, m1, m2, ma
1767
1768 if force:
1769 for f in merge:
1770 get[f] = merge[f][1]
1771 merge = {}
1772
1773 if linear_path or force:
1774 # we don't need to do any magic, just jump to the new rev
1775 branch_merge = False
1776 p1, p2 = p2, nullid
1777 else:
1778 if not allow:
1779 self.ui.status("this update spans a branch" +
1780 " affecting the following files:\n")
1781 fl = merge.keys() + get.keys()
1782 fl.sort()
1783 for f in fl:
1784 cf = ""
1785 if f in merge: cf = " (resolve)"
1786 self.ui.status(" %s%s\n" % (f, cf))
1787 self.ui.warn("aborting update spanning branches!\n")
1788 self.ui.status("(use update -m to merge across branches" +
1789 " or -C to lose changes)\n")
1790 return 1
1791 branch_merge = True
1792
1793 if moddirstate:
1794 self.dirstate.setparents(p1, p2)
1795
1796 # get the files we don't need to change
1797 files = get.keys()
1798 files.sort()
1799 for f in files:
1800 if f[0] == "/": continue
1801 self.ui.note("getting %s\n" % f)
1802 t = self.file(f).read(get[f])
1803 try:
1804 self.wwrite(f, t)
1805 except IOError:
1806 os.makedirs(os.path.dirname(self.wjoin(f)))
1807 self.wwrite(f, t)
1808 util.set_exec(self.wjoin(f), mf2[f])
1809 if moddirstate:
1810 if branch_merge:
1811 self.dirstate.update([f], 'n', st_mtime=-1)
1812 else:
1813 self.dirstate.update([f], 'n')
1814
1815 # merge the tricky bits
1816 files = merge.keys()
1817 files.sort()
1818 for f in files:
1819 self.ui.status("merging %s\n" % f)
1820 my, other, flag = merge[f]
1821 self.merge3(f, my, other)
1822 util.set_exec(self.wjoin(f), flag)
1823 if moddirstate:
1824 if branch_merge:
1825 # We've done a branch merge, mark this file as merged
1826 # so that we properly record the merger later
1827 self.dirstate.update([f], 'm')
1828 else:
1829 # We've update-merged a locally modified file, so
1830 # we set the dirstate to emulate a normal checkout
1831 # of that file some time in the past. Thus our
1832 # merge will appear as a normal local file
1833 # modification.
1834 f_len = len(self.file(f).read(other))
1835 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1836
1837 remove.sort()
1838 for f in remove:
1839 self.ui.note("removing %s\n" % f)
1840 try:
1841 os.unlink(self.wjoin(f))
1842 except OSError, inst:
1843 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1844 # try removing directories that might now be empty
1845 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1846 except: pass
1847 if moddirstate:
1848 if branch_merge:
1849 self.dirstate.update(remove, 'r')
1850 else:
1851 self.dirstate.forget(remove)
1852
1853 def merge3(self, fn, my, other):
1854 """perform a 3-way merge in the working directory"""
1855
1856 def temp(prefix, node):
1857 pre = "%s~%s." % (os.path.basename(fn), prefix)
1858 (fd, name) = tempfile.mkstemp("", pre)
1859 f = os.fdopen(fd, "wb")
1860 self.wwrite(fn, fl.read(node), f)
1861 f.close()
1862 return name
1863
1864 fl = self.file(fn)
1865 base = fl.ancestor(my, other)
1866 a = self.wjoin(fn)
1867 b = temp("base", base)
1868 c = temp("other", other)
1869
1870 self.ui.note("resolving %s\n" % fn)
1871 self.ui.debug("file %s: other %s ancestor %s\n" %
1872 (fn, short(other), short(base)))
1873
1874 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1875 or "hgmerge")
1876 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1877 if r:
1878 self.ui.warn("merging %s failed!\n" % fn)
1879
1880 os.unlink(b)
1881 os.unlink(c)
1882
1883 def verify(self):
1884 filelinkrevs = {}
1885 filenodes = {}
1886 changesets = revisions = files = 0
1887 errors = 0
1888
1889 seen = {}
1890 self.ui.status("checking changesets\n")
1891 for i in range(self.changelog.count()):
1892 changesets += 1
1893 n = self.changelog.node(i)
1894 if n in seen:
1895 self.ui.warn("duplicate changeset at revision %d\n" % i)
1896 errors += 1
1897 seen[n] = 1
1898
1899 for p in self.changelog.parents(n):
1900 if p not in self.changelog.nodemap:
1901 self.ui.warn("changeset %s has unknown parent %s\n" %
1902 (short(n), short(p)))
1903 errors += 1
1904 try:
1905 changes = self.changelog.read(n)
1906 except Exception, inst:
1907 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1908 errors += 1
1909
1910 for f in changes[3]:
1911 filelinkrevs.setdefault(f, []).append(i)
1912
1913 seen = {}
1914 self.ui.status("checking manifests\n")
1915 for i in range(self.manifest.count()):
1916 n = self.manifest.node(i)
1917 if n in seen:
1918 self.ui.warn("duplicate manifest at revision %d\n" % i)
1919 errors += 1
1920 seen[n] = 1
1921
1922 for p in self.manifest.parents(n):
1923 if p not in self.manifest.nodemap:
1924 self.ui.warn("manifest %s has unknown parent %s\n" %
1925 (short(n), short(p)))
1926 errors += 1
1927
1928 try:
1929 delta = mdiff.patchtext(self.manifest.delta(n))
1930 except KeyboardInterrupt:
1931 self.ui.warn("aborted")
1932 sys.exit(0)
1933 except Exception, inst:
1934 self.ui.warn("unpacking manifest %s: %s\n"
1935 % (short(n), inst))
1936 errors += 1
1937
1938 ff = [ l.split('\0') for l in delta.splitlines() ]
1939 for f, fn in ff:
1940 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1941
1942 self.ui.status("crosschecking files in changesets and manifests\n")
1943 for f in filenodes:
1944 if f not in filelinkrevs:
1945 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1946 errors += 1
1947
1948 for f in filelinkrevs:
1949 if f not in filenodes:
1950 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1951 errors += 1
1952
1953 self.ui.status("checking files\n")
1954 ff = filenodes.keys()
1955 ff.sort()
1956 for f in ff:
1957 if f == "/dev/null": continue
1958 files += 1
1959 fl = self.file(f)
1960 nodes = { nullid: 1 }
1961 seen = {}
1962 for i in range(fl.count()):
1963 revisions += 1
1964 n = fl.node(i)
1965
1966 if n in seen:
1967 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1968 errors += 1
1969
1970 if n not in filenodes[f]:
1971 self.ui.warn("%s: %d:%s not in manifests\n"
1972 % (f, i, short(n)))
1973 errors += 1
1974 else:
1975 del filenodes[f][n]
1976
1977 flr = fl.linkrev(n)
1978 if flr not in filelinkrevs[f]:
1979 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1980 % (f, short(n), fl.linkrev(n)))
1981 errors += 1
1982 else:
1983 filelinkrevs[f].remove(flr)
1984
1985 # verify contents
1986 try:
1987 t = fl.read(n)
1988 except Exception, inst:
1989 self.ui.warn("unpacking file %s %s: %s\n"
1990 % (f, short(n), inst))
1991 errors += 1
1992
1993 # verify parents
1994 (p1, p2) = fl.parents(n)
1995 if p1 not in nodes:
1996 self.ui.warn("file %s:%s unknown parent 1 %s" %
1997 (f, short(n), short(p1)))
1998 errors += 1
1999 if p2 not in nodes:
2000 self.ui.warn("file %s:%s unknown parent 2 %s" %
2001 (f, short(n), short(p1)))
2002 errors += 1
2003 nodes[n] = 1
2004
2005 # cross-check
2006 for node in filenodes[f]:
2007 self.ui.warn("node %s in manifests not in %s\n"
2008 % (hex(node), f))
2009 errors += 1
2010
2011 self.ui.status("%d files, %d changesets, %d total revisions\n" %
2012 (files, changesets, revisions))
2013
2014 if errors:
2015 self.ui.warn("%d integrity errors encountered!\n" % errors)
2016 return 1
2017
2018 class remoterepository:
2019 def local(self):
2020 return False
2021
2022 class httprepository(remoterepository):
2023 def __init__(self, ui, path):
2024 # fix missing / after hostname
2025 s = urlparse.urlsplit(path)
2026 partial = s[2]
2027 if not partial: partial = "/"
2028 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
2029 self.ui = ui
2030 no_list = [ "localhost", "127.0.0.1" ]
2031 host = ui.config("http_proxy", "host")
2032 if host is None:
2033 host = os.environ.get("http_proxy")
2034 if host and host.startswith('http://'):
2035 host = host[7:]
2036 user = ui.config("http_proxy", "user")
2037 passwd = ui.config("http_proxy", "passwd")
2038 no = ui.config("http_proxy", "no")
2039 if no is None:
2040 no = os.environ.get("no_proxy")
2041 if no:
2042 no_list = no_list + no.split(",")
2043
2044 no_proxy = 0
2045 for h in no_list:
2046 if (path.startswith("http://" + h + "/") or
2047 path.startswith("http://" + h + ":") or
2048 path == "http://" + h):
2049 no_proxy = 1
2050
2051 # Note: urllib2 takes proxy values from the environment and those will
2052 # take precedence
2053 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
2054 try:
2055 if os.environ.has_key(env):
2056 del os.environ[env]
2057 except OSError:
2058 pass
2059
2060 proxy_handler = urllib2.BaseHandler()
2061 if host and not no_proxy:
2062 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
2063
2064 authinfo = None
2065 if user and passwd:
2066 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
2067 passmgr.add_password(None, host, user, passwd)
2068 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
2069
2070 opener = urllib2.build_opener(proxy_handler, authinfo)
2071 urllib2.install_opener(opener)
2072
2073 def dev(self):
2074 return -1
2075
2076 def do_cmd(self, cmd, **args):
2077 self.ui.debug("sending %s command\n" % cmd)
2078 q = {"cmd": cmd}
2079 q.update(args)
2080 qs = urllib.urlencode(q)
2081 cu = "%s?%s" % (self.url, qs)
2082 resp = urllib2.urlopen(cu)
2083 proto = resp.headers['content-type']
2084
2085 # accept old "text/plain" and "application/hg-changegroup" for now
2086 if not proto.startswith('application/mercurial') and \
2087 not proto.startswith('text/plain') and \
2088 not proto.startswith('application/hg-changegroup'):
2089 raise RepoError("'%s' does not appear to be an hg repository"
2090 % self.url)
2091
2092 if proto.startswith('application/mercurial'):
2093 version = proto[22:]
2094 if float(version) > 0.1:
2095 raise RepoError("'%s' uses newer protocol %s" %
2096 (self.url, version))
2097
2098 return resp
2099
2100 def heads(self):
2101 d = self.do_cmd("heads").read()
2102 try:
2103 return map(bin, d[:-1].split(" "))
2104 except:
2105 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2106 raise
2107
2108 def branches(self, nodes):
2109 n = " ".join(map(hex, nodes))
2110 d = self.do_cmd("branches", nodes=n).read()
2111 try:
2112 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2113 return br
2114 except:
2115 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2116 raise
2117
2118 def between(self, pairs):
2119 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2120 d = self.do_cmd("between", pairs=n).read()
2121 try:
2122 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2123 return p
2124 except:
2125 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2126 raise
2127
2128 def changegroup(self, nodes):
2129 n = " ".join(map(hex, nodes))
2130 f = self.do_cmd("changegroup", roots=n)
2131 bytes = 0
2132
2133 class zread:
2134 def __init__(self, f):
2135 self.zd = zlib.decompressobj()
2136 self.f = f
2137 self.buf = ""
2138 def read(self, l):
2139 while l > len(self.buf):
2140 r = self.f.read(4096)
2141 if r:
2142 self.buf += self.zd.decompress(r)
2143 else:
2144 self.buf += self.zd.flush()
2145 break
2146 d, self.buf = self.buf[:l], self.buf[l:]
2147 return d
2148
2149 return zread(f)
2150
2151 class remotelock:
2152 def __init__(self, repo):
2153 self.repo = repo
2154 def release(self):
2155 self.repo.unlock()
2156 self.repo = None
2157 def __del__(self):
2158 if self.repo:
2159 self.release()
2160
2161 class sshrepository(remoterepository):
2162 def __init__(self, ui, path):
2163 self.url = path
2164 self.ui = ui
2165
2166 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
2167 if not m:
2168 raise RepoError("couldn't parse destination %s" % path)
2169
2170 self.user = m.group(2)
2171 self.host = m.group(3)
2172 self.port = m.group(5)
2173 self.path = m.group(7) or "."
2174
2175 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2176 args = self.port and ("%s -p %s") % (args, self.port) or args
2177
2178 sshcmd = self.ui.config("ui", "ssh", "ssh")
2179 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2180 cmd = "%s %s '%s -R %s serve --stdio'"
2181 cmd = cmd % (sshcmd, args, remotecmd, self.path)
2182
2183 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2184
2185 def readerr(self):
2186 while 1:
2187 r,w,x = select.select([self.pipee], [], [], 0)
2188 if not r: break
2189 l = self.pipee.readline()
2190 if not l: break
2191 self.ui.status("remote: ", l)
2192
2193 def __del__(self):
2194 try:
2195 self.pipeo.close()
2196 self.pipei.close()
2197 for l in self.pipee:
2198 self.ui.status("remote: ", l)
2199 self.pipee.close()
2200 except:
2201 pass
2202
2203 def dev(self):
2204 return -1
2205
2206 def do_cmd(self, cmd, **args):
2207 self.ui.debug("sending %s command\n" % cmd)
2208 self.pipeo.write("%s\n" % cmd)
2209 for k, v in args.items():
2210 self.pipeo.write("%s %d\n" % (k, len(v)))
2211 self.pipeo.write(v)
2212 self.pipeo.flush()
2213
2214 return self.pipei
2215
2216 def call(self, cmd, **args):
2217 r = self.do_cmd(cmd, **args)
2218 l = r.readline()
2219 self.readerr()
2220 try:
2221 l = int(l)
2222 except:
2223 raise RepoError("unexpected response '%s'" % l)
2224 return r.read(l)
2225
2226 def lock(self):
2227 self.call("lock")
2228 return remotelock(self)
2229
2230 def unlock(self):
2231 self.call("unlock")
2232
2233 def heads(self):
2234 d = self.call("heads")
2235 try:
2236 return map(bin, d[:-1].split(" "))
2237 except:
2238 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2239
2240 def branches(self, nodes):
2241 n = " ".join(map(hex, nodes))
2242 d = self.call("branches", nodes=n)
2243 try:
2244 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2245 return br
2246 except:
2247 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2248
2249 def between(self, pairs):
2250 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2251 d = self.call("between", pairs=n)
2252 try:
2253 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2254 return p
2255 except:
2256 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2257
2258 def changegroup(self, nodes):
2259 n = " ".join(map(hex, nodes))
2260 f = self.do_cmd("changegroup", roots=n)
2261 return self.pipei
2262
2263 def addchangegroup(self, cg):
2264 d = self.call("addchangegroup")
2265 if d:
2266 raise RepoError("push refused: %s", d)
2267
2268 while 1:
2269 d = cg.read(4096)
2270 if not d: break
2271 self.pipeo.write(d)
2272 self.readerr()
2273
2274 self.pipeo.flush()
2275
2276 self.readerr()
2277 l = int(self.pipei.readline())
2278 return self.pipei.read(l) != ""
2279
2280 class httpsrepository(httprepository):
2281 pass
2282
2283 def repository(ui, path=None, create=0):
2284 if path:
2285 if path.startswith("http://"):
2286 return httprepository(ui, path)
2287 if path.startswith("https://"):
2288 return httpsrepository(ui, path)
2289 if path.startswith("hg://"):
2290 return httprepository(ui, path.replace("hg://", "http://"))
2291 if path.startswith("old-http://"):
2292 return localrepository(ui, path.replace("old-http://", "http://"))
2293 if path.startswith("ssh://"):
2294 return sshrepository(ui, path)
2295
2296 return localrepository(ui, path, create)
1 NO CONTENT: file copied from mercurial/hg.py to mercurial/remoterepo.py
NO CONTENT: file copied from mercurial/hg.py to mercurial/remoterepo.py
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file copied from mercurial/hg.py to mercurial/sshrepo.py
NO CONTENT: file copied from mercurial/hg.py to mercurial/sshrepo.py
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now