##// END OF EJS Templates
cleanup of revlog.group when repository is local...
Benoit Boissinot -
r1677:11d12bd6 default
parent child Browse files
Show More
@@ -1,57 +1,58 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from revlog import *
8 from revlog import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import demandload
10 from demandload import demandload
11 demandload(globals(), "os time util")
11 demandload(globals(), "os time util")
12
12
13 class changelog(revlog):
13 class changelog(revlog):
14 def __init__(self, opener):
14 def __init__(self, opener, local=True):
15 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
15 revlog.__init__(self, opener, "00changelog.i", "00changelog.d",
16 local=local)
16
17
17 def extract(self, text):
18 def extract(self, text):
18 if not text:
19 if not text:
19 return (nullid, "", (0, 0), [], "")
20 return (nullid, "", (0, 0), [], "")
20 last = text.index("\n\n")
21 last = text.index("\n\n")
21 desc = text[last + 2:]
22 desc = text[last + 2:]
22 l = text[:last].splitlines()
23 l = text[:last].splitlines()
23 manifest = bin(l[0])
24 manifest = bin(l[0])
24 user = l[1]
25 user = l[1]
25 date = l[2].split(' ')
26 date = l[2].split(' ')
26 time = float(date.pop(0))
27 time = float(date.pop(0))
27 try:
28 try:
28 # various tools did silly things with the time zone field.
29 # various tools did silly things with the time zone field.
29 timezone = int(date[0])
30 timezone = int(date[0])
30 except:
31 except:
31 timezone = 0
32 timezone = 0
32 files = l[3:]
33 files = l[3:]
33 return (manifest, user, (time, timezone), files, desc)
34 return (manifest, user, (time, timezone), files, desc)
34
35
35 def read(self, node):
36 def read(self, node):
36 return self.extract(self.revision(node))
37 return self.extract(self.revision(node))
37
38
38 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
39 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
39 user=None, date=None):
40 user=None, date=None):
40 if date:
41 if date:
41 # validate explicit (probably user-specified) date and
42 # validate explicit (probably user-specified) date and
42 # time zone offset. values must fit in signed 32 bits for
43 # time zone offset. values must fit in signed 32 bits for
43 # current 32-bit linux runtimes.
44 # current 32-bit linux runtimes.
44 try:
45 try:
45 when, offset = map(int, date.split(' '))
46 when, offset = map(int, date.split(' '))
46 except ValueError:
47 except ValueError:
47 raise ValueError(_('invalid date: %r') % date)
48 raise ValueError(_('invalid date: %r') % date)
48 if abs(when) > 0x7fffffff:
49 if abs(when) > 0x7fffffff:
49 raise ValueError(_('date exceeds 32 bits: %d') % when)
50 raise ValueError(_('date exceeds 32 bits: %d') % when)
50 if abs(offset) >= 43200:
51 if abs(offset) >= 43200:
51 raise ValueError(_('impossible time zone offset: %d') % offset)
52 raise ValueError(_('impossible time zone offset: %d') % offset)
52 else:
53 else:
53 date = "%d %d" % util.makedate()
54 date = "%d %d" % util.makedate()
54 list.sort()
55 list.sort()
55 l = [hex(manifest), user, date] + list + ["", desc]
56 l = [hex(manifest), user, date] + list + ["", desc]
56 text = "\n".join(l)
57 text = "\n".join(l)
57 return self.addrevision(text, transaction, self.count(), p1, p2)
58 return self.addrevision(text, transaction, self.count(), p1, p2)
@@ -1,107 +1,108 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os
8 import os
9 from revlog import *
9 from revlog import *
10 from demandload import *
10 from demandload import *
11 demandload(globals(), "bdiff")
11 demandload(globals(), "bdiff")
12
12
13 class filelog(revlog):
13 class filelog(revlog):
14 def __init__(self, opener, path):
14 def __init__(self, opener, path, local=True):
15 revlog.__init__(self, opener,
15 revlog.__init__(self, opener,
16 os.path.join("data", self.encodedir(path + ".i")),
16 os.path.join("data", self.encodedir(path + ".i")),
17 os.path.join("data", self.encodedir(path + ".d")))
17 os.path.join("data", self.encodedir(path + ".d")),
18 local=local)
18
19
19 # This avoids a collision between a file named foo and a dir named
20 # This avoids a collision between a file named foo and a dir named
20 # foo.i or foo.d
21 # foo.i or foo.d
21 def encodedir(self, path):
22 def encodedir(self, path):
22 return (path
23 return (path
23 .replace(".hg/", ".hg.hg/")
24 .replace(".hg/", ".hg.hg/")
24 .replace(".i/", ".i.hg/")
25 .replace(".i/", ".i.hg/")
25 .replace(".d/", ".d.hg/"))
26 .replace(".d/", ".d.hg/"))
26
27
27 def decodedir(self, path):
28 def decodedir(self, path):
28 return (path
29 return (path
29 .replace(".d.hg/", ".d/")
30 .replace(".d.hg/", ".d/")
30 .replace(".i.hg/", ".i/")
31 .replace(".i.hg/", ".i/")
31 .replace(".hg.hg/", ".hg/"))
32 .replace(".hg.hg/", ".hg/"))
32
33
33 def read(self, node):
34 def read(self, node):
34 t = self.revision(node)
35 t = self.revision(node)
35 if not t.startswith('\1\n'):
36 if not t.startswith('\1\n'):
36 return t
37 return t
37 s = t.find('\1\n', 2)
38 s = t.find('\1\n', 2)
38 return t[s+2:]
39 return t[s+2:]
39
40
40 def readmeta(self, node):
41 def readmeta(self, node):
41 t = self.revision(node)
42 t = self.revision(node)
42 if not t.startswith('\1\n'):
43 if not t.startswith('\1\n'):
43 return {}
44 return {}
44 s = t.find('\1\n', 2)
45 s = t.find('\1\n', 2)
45 mt = t[2:s]
46 mt = t[2:s]
46 m = {}
47 m = {}
47 for l in mt.splitlines():
48 for l in mt.splitlines():
48 k, v = l.split(": ", 1)
49 k, v = l.split(": ", 1)
49 m[k] = v
50 m[k] = v
50 return m
51 return m
51
52
52 def add(self, text, meta, transaction, link, p1=None, p2=None):
53 def add(self, text, meta, transaction, link, p1=None, p2=None):
53 if meta or text.startswith('\1\n'):
54 if meta or text.startswith('\1\n'):
54 mt = ""
55 mt = ""
55 if meta:
56 if meta:
56 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
57 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
57 text = "\1\n%s\1\n%s" % ("".join(mt), text)
58 text = "\1\n%s\1\n%s" % ("".join(mt), text)
58 return self.addrevision(text, transaction, link, p1, p2)
59 return self.addrevision(text, transaction, link, p1, p2)
59
60
60 def renamed(self, node):
61 def renamed(self, node):
61 if 0 and self.parents(node)[0] != nullid: # XXX
62 if 0 and self.parents(node)[0] != nullid: # XXX
62 return False
63 return False
63 m = self.readmeta(node)
64 m = self.readmeta(node)
64 if m and m.has_key("copy"):
65 if m and m.has_key("copy"):
65 return (m["copy"], bin(m["copyrev"]))
66 return (m["copy"], bin(m["copyrev"]))
66 return False
67 return False
67
68
68 def annotate(self, node):
69 def annotate(self, node):
69
70
70 def decorate(text, rev):
71 def decorate(text, rev):
71 return ([rev] * len(text.splitlines()), text)
72 return ([rev] * len(text.splitlines()), text)
72
73
73 def pair(parent, child):
74 def pair(parent, child):
74 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
75 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
75 child[0][b1:b2] = parent[0][a1:a2]
76 child[0][b1:b2] = parent[0][a1:a2]
76 return child
77 return child
77
78
78 # find all ancestors
79 # find all ancestors
79 needed = {node:1}
80 needed = {node:1}
80 visit = [node]
81 visit = [node]
81 while visit:
82 while visit:
82 n = visit.pop(0)
83 n = visit.pop(0)
83 for p in self.parents(n):
84 for p in self.parents(n):
84 if p not in needed:
85 if p not in needed:
85 needed[p] = 1
86 needed[p] = 1
86 visit.append(p)
87 visit.append(p)
87 else:
88 else:
88 # count how many times we'll use this
89 # count how many times we'll use this
89 needed[p] += 1
90 needed[p] += 1
90
91
91 # sort by revision which is a topological order
92 # sort by revision which is a topological order
92 visit = [ (self.rev(n), n) for n in needed.keys() ]
93 visit = [ (self.rev(n), n) for n in needed.keys() ]
93 visit.sort()
94 visit.sort()
94 hist = {}
95 hist = {}
95
96
96 for r,n in visit:
97 for r,n in visit:
97 curr = decorate(self.read(n), self.linkrev(n))
98 curr = decorate(self.read(n), self.linkrev(n))
98 for p in self.parents(n):
99 for p in self.parents(n):
99 if p != nullid:
100 if p != nullid:
100 curr = pair(hist[p], curr)
101 curr = pair(hist[p], curr)
101 # trim the history of unneeded revs
102 # trim the history of unneeded revs
102 needed[p] -= 1
103 needed[p] -= 1
103 if not needed[p]:
104 if not needed[p]:
104 del hist[p]
105 del hist[p]
105 hist[n] = curr
106 hist[n] = curr
106
107
107 return zip(hist[n][0], hist[n][1].splitlines(1))
108 return zip(hist[n][0], hist[n][1].splitlines(1))
@@ -1,1786 +1,1786 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct, os, util
8 import struct, os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14
14
15 class localrepository(object):
15 class localrepository(object):
16 def __init__(self, ui, path=None, create=0):
16 def __init__(self, ui, path=None, create=0):
17 if not path:
17 if not path:
18 p = os.getcwd()
18 p = os.getcwd()
19 while not os.path.isdir(os.path.join(p, ".hg")):
19 while not os.path.isdir(os.path.join(p, ".hg")):
20 oldp = p
20 oldp = p
21 p = os.path.dirname(p)
21 p = os.path.dirname(p)
22 if p == oldp: raise repo.RepoError(_("no repo found"))
22 if p == oldp: raise repo.RepoError(_("no repo found"))
23 path = p
23 path = p
24 self.path = os.path.join(path, ".hg")
24 self.path = os.path.join(path, ".hg")
25
25
26 if not create and not os.path.isdir(self.path):
26 if not create and not os.path.isdir(self.path):
27 raise repo.RepoError(_("repository %s not found") % self.path)
27 raise repo.RepoError(_("repository %s not found") % self.path)
28
28
29 self.root = os.path.abspath(path)
29 self.root = os.path.abspath(path)
30 self.ui = ui
30 self.ui = ui
31 self.opener = util.opener(self.path)
31 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
32 self.wopener = util.opener(self.root)
33 self.manifest = manifest.manifest(self.opener)
33 self.manifest = manifest.manifest(self.opener, local=self.local())
34 self.changelog = changelog.changelog(self.opener)
34 self.changelog = changelog.changelog(self.opener, local=self.local())
35 self.tagscache = None
35 self.tagscache = None
36 self.nodetagscache = None
36 self.nodetagscache = None
37 self.encodepats = None
37 self.encodepats = None
38 self.decodepats = None
38 self.decodepats = None
39
39
40 if create:
40 if create:
41 os.mkdir(self.path)
41 os.mkdir(self.path)
42 os.mkdir(self.join("data"))
42 os.mkdir(self.join("data"))
43
43
44 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
44 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
45 try:
45 try:
46 self.ui.readconfig(self.join("hgrc"))
46 self.ui.readconfig(self.join("hgrc"))
47 except IOError: pass
47 except IOError: pass
48
48
49 def hook(self, name, **args):
49 def hook(self, name, **args):
50 def runhook(name, cmd):
50 def runhook(name, cmd):
51 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
51 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
52 old = {}
52 old = {}
53 for k, v in args.items():
53 for k, v in args.items():
54 k = k.upper()
54 k = k.upper()
55 old[k] = os.environ.get(k, None)
55 old[k] = os.environ.get(k, None)
56 os.environ[k] = v
56 os.environ[k] = v
57
57
58 # Hooks run in the repository root
58 # Hooks run in the repository root
59 olddir = os.getcwd()
59 olddir = os.getcwd()
60 os.chdir(self.root)
60 os.chdir(self.root)
61 r = os.system(cmd)
61 r = os.system(cmd)
62 os.chdir(olddir)
62 os.chdir(olddir)
63
63
64 for k, v in old.items():
64 for k, v in old.items():
65 if v != None:
65 if v != None:
66 os.environ[k] = v
66 os.environ[k] = v
67 else:
67 else:
68 del os.environ[k]
68 del os.environ[k]
69
69
70 if r:
70 if r:
71 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
71 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
72 (name, r))
72 (name, r))
73 return False
73 return False
74 return True
74 return True
75
75
76 r = True
76 r = True
77 for hname, cmd in self.ui.configitems("hooks"):
77 for hname, cmd in self.ui.configitems("hooks"):
78 s = hname.split(".")
78 s = hname.split(".")
79 if s[0] == name and cmd:
79 if s[0] == name and cmd:
80 r = runhook(hname, cmd) and r
80 r = runhook(hname, cmd) and r
81 return r
81 return r
82
82
83 def tags(self):
83 def tags(self):
84 '''return a mapping of tag to node'''
84 '''return a mapping of tag to node'''
85 if not self.tagscache:
85 if not self.tagscache:
86 self.tagscache = {}
86 self.tagscache = {}
87 def addtag(self, k, n):
87 def addtag(self, k, n):
88 try:
88 try:
89 bin_n = bin(n)
89 bin_n = bin(n)
90 except TypeError:
90 except TypeError:
91 bin_n = ''
91 bin_n = ''
92 self.tagscache[k.strip()] = bin_n
92 self.tagscache[k.strip()] = bin_n
93
93
94 try:
94 try:
95 # read each head of the tags file, ending with the tip
95 # read each head of the tags file, ending with the tip
96 # and add each tag found to the map, with "newer" ones
96 # and add each tag found to the map, with "newer" ones
97 # taking precedence
97 # taking precedence
98 fl = self.file(".hgtags")
98 fl = self.file(".hgtags")
99 h = fl.heads()
99 h = fl.heads()
100 h.reverse()
100 h.reverse()
101 for r in h:
101 for r in h:
102 for l in fl.read(r).splitlines():
102 for l in fl.read(r).splitlines():
103 if l:
103 if l:
104 n, k = l.split(" ", 1)
104 n, k = l.split(" ", 1)
105 addtag(self, k, n)
105 addtag(self, k, n)
106 except KeyError:
106 except KeyError:
107 pass
107 pass
108
108
109 try:
109 try:
110 f = self.opener("localtags")
110 f = self.opener("localtags")
111 for l in f:
111 for l in f:
112 n, k = l.split(" ", 1)
112 n, k = l.split(" ", 1)
113 addtag(self, k, n)
113 addtag(self, k, n)
114 except IOError:
114 except IOError:
115 pass
115 pass
116
116
117 self.tagscache['tip'] = self.changelog.tip()
117 self.tagscache['tip'] = self.changelog.tip()
118
118
119 return self.tagscache
119 return self.tagscache
120
120
121 def tagslist(self):
121 def tagslist(self):
122 '''return a list of tags ordered by revision'''
122 '''return a list of tags ordered by revision'''
123 l = []
123 l = []
124 for t, n in self.tags().items():
124 for t, n in self.tags().items():
125 try:
125 try:
126 r = self.changelog.rev(n)
126 r = self.changelog.rev(n)
127 except:
127 except:
128 r = -2 # sort to the beginning of the list if unknown
128 r = -2 # sort to the beginning of the list if unknown
129 l.append((r,t,n))
129 l.append((r,t,n))
130 l.sort()
130 l.sort()
131 return [(t,n) for r,t,n in l]
131 return [(t,n) for r,t,n in l]
132
132
133 def nodetags(self, node):
133 def nodetags(self, node):
134 '''return the tags associated with a node'''
134 '''return the tags associated with a node'''
135 if not self.nodetagscache:
135 if not self.nodetagscache:
136 self.nodetagscache = {}
136 self.nodetagscache = {}
137 for t,n in self.tags().items():
137 for t,n in self.tags().items():
138 self.nodetagscache.setdefault(n,[]).append(t)
138 self.nodetagscache.setdefault(n,[]).append(t)
139 return self.nodetagscache.get(node, [])
139 return self.nodetagscache.get(node, [])
140
140
141 def lookup(self, key):
141 def lookup(self, key):
142 try:
142 try:
143 return self.tags()[key]
143 return self.tags()[key]
144 except KeyError:
144 except KeyError:
145 try:
145 try:
146 return self.changelog.lookup(key)
146 return self.changelog.lookup(key)
147 except:
147 except:
148 raise repo.RepoError(_("unknown revision '%s'") % key)
148 raise repo.RepoError(_("unknown revision '%s'") % key)
149
149
150 def dev(self):
150 def dev(self):
151 return os.stat(self.path).st_dev
151 return os.stat(self.path).st_dev
152
152
153 def local(self):
153 def local(self):
154 return True
154 return True
155
155
156 def join(self, f):
156 def join(self, f):
157 return os.path.join(self.path, f)
157 return os.path.join(self.path, f)
158
158
159 def wjoin(self, f):
159 def wjoin(self, f):
160 return os.path.join(self.root, f)
160 return os.path.join(self.root, f)
161
161
162 def file(self, f):
162 def file(self, f):
163 if f[0] == '/': f = f[1:]
163 if f[0] == '/': f = f[1:]
164 return filelog.filelog(self.opener, f)
164 return filelog.filelog(self.opener, f, local=self.local())
165
165
166 def getcwd(self):
166 def getcwd(self):
167 return self.dirstate.getcwd()
167 return self.dirstate.getcwd()
168
168
169 def wfile(self, f, mode='r'):
169 def wfile(self, f, mode='r'):
170 return self.wopener(f, mode)
170 return self.wopener(f, mode)
171
171
172 def wread(self, filename):
172 def wread(self, filename):
173 if self.encodepats == None:
173 if self.encodepats == None:
174 l = []
174 l = []
175 for pat, cmd in self.ui.configitems("encode"):
175 for pat, cmd in self.ui.configitems("encode"):
176 mf = util.matcher("", "/", [pat], [], [])[1]
176 mf = util.matcher("", "/", [pat], [], [])[1]
177 l.append((mf, cmd))
177 l.append((mf, cmd))
178 self.encodepats = l
178 self.encodepats = l
179
179
180 data = self.wopener(filename, 'r').read()
180 data = self.wopener(filename, 'r').read()
181
181
182 for mf, cmd in self.encodepats:
182 for mf, cmd in self.encodepats:
183 if mf(filename):
183 if mf(filename):
184 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
184 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
185 data = util.filter(data, cmd)
185 data = util.filter(data, cmd)
186 break
186 break
187
187
188 return data
188 return data
189
189
190 def wwrite(self, filename, data, fd=None):
190 def wwrite(self, filename, data, fd=None):
191 if self.decodepats == None:
191 if self.decodepats == None:
192 l = []
192 l = []
193 for pat, cmd in self.ui.configitems("decode"):
193 for pat, cmd in self.ui.configitems("decode"):
194 mf = util.matcher("", "/", [pat], [], [])[1]
194 mf = util.matcher("", "/", [pat], [], [])[1]
195 l.append((mf, cmd))
195 l.append((mf, cmd))
196 self.decodepats = l
196 self.decodepats = l
197
197
198 for mf, cmd in self.decodepats:
198 for mf, cmd in self.decodepats:
199 if mf(filename):
199 if mf(filename):
200 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
200 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
201 data = util.filter(data, cmd)
201 data = util.filter(data, cmd)
202 break
202 break
203
203
204 if fd:
204 if fd:
205 return fd.write(data)
205 return fd.write(data)
206 return self.wopener(filename, 'w').write(data)
206 return self.wopener(filename, 'w').write(data)
207
207
208 def transaction(self):
208 def transaction(self):
209 # save dirstate for undo
209 # save dirstate for undo
210 try:
210 try:
211 ds = self.opener("dirstate").read()
211 ds = self.opener("dirstate").read()
212 except IOError:
212 except IOError:
213 ds = ""
213 ds = ""
214 self.opener("journal.dirstate", "w").write(ds)
214 self.opener("journal.dirstate", "w").write(ds)
215
215
216 def after():
216 def after():
217 util.rename(self.join("journal"), self.join("undo"))
217 util.rename(self.join("journal"), self.join("undo"))
218 util.rename(self.join("journal.dirstate"),
218 util.rename(self.join("journal.dirstate"),
219 self.join("undo.dirstate"))
219 self.join("undo.dirstate"))
220
220
221 return transaction.transaction(self.ui.warn, self.opener,
221 return transaction.transaction(self.ui.warn, self.opener,
222 self.join("journal"), after)
222 self.join("journal"), after)
223
223
224 def recover(self):
224 def recover(self):
225 lock = self.lock()
225 lock = self.lock()
226 if os.path.exists(self.join("journal")):
226 if os.path.exists(self.join("journal")):
227 self.ui.status(_("rolling back interrupted transaction\n"))
227 self.ui.status(_("rolling back interrupted transaction\n"))
228 transaction.rollback(self.opener, self.join("journal"))
228 transaction.rollback(self.opener, self.join("journal"))
229 return True
229 return True
230 else:
230 else:
231 self.ui.warn(_("no interrupted transaction available\n"))
231 self.ui.warn(_("no interrupted transaction available\n"))
232 return False
232 return False
233
233
234 def undo(self):
234 def undo(self):
235 wlock = self.wlock()
235 wlock = self.wlock()
236 lock = self.lock()
236 lock = self.lock()
237 if os.path.exists(self.join("undo")):
237 if os.path.exists(self.join("undo")):
238 self.ui.status(_("rolling back last transaction\n"))
238 self.ui.status(_("rolling back last transaction\n"))
239 transaction.rollback(self.opener, self.join("undo"))
239 transaction.rollback(self.opener, self.join("undo"))
240 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
240 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
241 self.dirstate.read()
241 self.dirstate.read()
242 else:
242 else:
243 self.ui.warn(_("no undo information available\n"))
243 self.ui.warn(_("no undo information available\n"))
244
244
245 def lock(self, wait=1):
245 def lock(self, wait=1):
246 try:
246 try:
247 return lock.lock(self.join("lock"), 0)
247 return lock.lock(self.join("lock"), 0)
248 except lock.LockHeld, inst:
248 except lock.LockHeld, inst:
249 if wait:
249 if wait:
250 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
250 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
251 return lock.lock(self.join("lock"), wait)
251 return lock.lock(self.join("lock"), wait)
252 raise inst
252 raise inst
253
253
254 def wlock(self, wait=1):
254 def wlock(self, wait=1):
255 try:
255 try:
256 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
256 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
257 except lock.LockHeld, inst:
257 except lock.LockHeld, inst:
258 if not wait:
258 if not wait:
259 raise inst
259 raise inst
260 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
260 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
261 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
261 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
262 self.dirstate.read()
262 self.dirstate.read()
263 return wlock
263 return wlock
264
264
265 def rawcommit(self, files, text, user, date, p1=None, p2=None):
265 def rawcommit(self, files, text, user, date, p1=None, p2=None):
266 orig_parent = self.dirstate.parents()[0] or nullid
266 orig_parent = self.dirstate.parents()[0] or nullid
267 p1 = p1 or self.dirstate.parents()[0] or nullid
267 p1 = p1 or self.dirstate.parents()[0] or nullid
268 p2 = p2 or self.dirstate.parents()[1] or nullid
268 p2 = p2 or self.dirstate.parents()[1] or nullid
269 c1 = self.changelog.read(p1)
269 c1 = self.changelog.read(p1)
270 c2 = self.changelog.read(p2)
270 c2 = self.changelog.read(p2)
271 m1 = self.manifest.read(c1[0])
271 m1 = self.manifest.read(c1[0])
272 mf1 = self.manifest.readflags(c1[0])
272 mf1 = self.manifest.readflags(c1[0])
273 m2 = self.manifest.read(c2[0])
273 m2 = self.manifest.read(c2[0])
274 changed = []
274 changed = []
275
275
276 if orig_parent == p1:
276 if orig_parent == p1:
277 update_dirstate = 1
277 update_dirstate = 1
278 else:
278 else:
279 update_dirstate = 0
279 update_dirstate = 0
280
280
281 wlock = self.wlock()
281 wlock = self.wlock()
282 lock = self.lock()
282 lock = self.lock()
283 tr = self.transaction()
283 tr = self.transaction()
284 mm = m1.copy()
284 mm = m1.copy()
285 mfm = mf1.copy()
285 mfm = mf1.copy()
286 linkrev = self.changelog.count()
286 linkrev = self.changelog.count()
287 for f in files:
287 for f in files:
288 try:
288 try:
289 t = self.wread(f)
289 t = self.wread(f)
290 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
290 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
291 r = self.file(f)
291 r = self.file(f)
292 mfm[f] = tm
292 mfm[f] = tm
293
293
294 fp1 = m1.get(f, nullid)
294 fp1 = m1.get(f, nullid)
295 fp2 = m2.get(f, nullid)
295 fp2 = m2.get(f, nullid)
296
296
297 # is the same revision on two branches of a merge?
297 # is the same revision on two branches of a merge?
298 if fp2 == fp1:
298 if fp2 == fp1:
299 fp2 = nullid
299 fp2 = nullid
300
300
301 if fp2 != nullid:
301 if fp2 != nullid:
302 # is one parent an ancestor of the other?
302 # is one parent an ancestor of the other?
303 fpa = r.ancestor(fp1, fp2)
303 fpa = r.ancestor(fp1, fp2)
304 if fpa == fp1:
304 if fpa == fp1:
305 fp1, fp2 = fp2, nullid
305 fp1, fp2 = fp2, nullid
306 elif fpa == fp2:
306 elif fpa == fp2:
307 fp2 = nullid
307 fp2 = nullid
308
308
309 # is the file unmodified from the parent?
309 # is the file unmodified from the parent?
310 if t == r.read(fp1):
310 if t == r.read(fp1):
311 # record the proper existing parent in manifest
311 # record the proper existing parent in manifest
312 # no need to add a revision
312 # no need to add a revision
313 mm[f] = fp1
313 mm[f] = fp1
314 continue
314 continue
315
315
316 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
316 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
317 changed.append(f)
317 changed.append(f)
318 if update_dirstate:
318 if update_dirstate:
319 self.dirstate.update([f], "n")
319 self.dirstate.update([f], "n")
320 except IOError:
320 except IOError:
321 try:
321 try:
322 del mm[f]
322 del mm[f]
323 del mfm[f]
323 del mfm[f]
324 if update_dirstate:
324 if update_dirstate:
325 self.dirstate.forget([f])
325 self.dirstate.forget([f])
326 except:
326 except:
327 # deleted from p2?
327 # deleted from p2?
328 pass
328 pass
329
329
330 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
330 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
331 user = user or self.ui.username()
331 user = user or self.ui.username()
332 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
332 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
333 tr.close()
333 tr.close()
334 if update_dirstate:
334 if update_dirstate:
335 self.dirstate.setparents(n, nullid)
335 self.dirstate.setparents(n, nullid)
336
336
337 def commit(self, files = None, text = "", user = None, date = None,
337 def commit(self, files = None, text = "", user = None, date = None,
338 match = util.always, force=False):
338 match = util.always, force=False):
339 commit = []
339 commit = []
340 remove = []
340 remove = []
341 changed = []
341 changed = []
342
342
343 if files:
343 if files:
344 for f in files:
344 for f in files:
345 s = self.dirstate.state(f)
345 s = self.dirstate.state(f)
346 if s in 'nmai':
346 if s in 'nmai':
347 commit.append(f)
347 commit.append(f)
348 elif s == 'r':
348 elif s == 'r':
349 remove.append(f)
349 remove.append(f)
350 else:
350 else:
351 self.ui.warn(_("%s not tracked!\n") % f)
351 self.ui.warn(_("%s not tracked!\n") % f)
352 else:
352 else:
353 (c, a, d, u) = self.changes(match=match)
353 (c, a, d, u) = self.changes(match=match)
354 commit = c + a
354 commit = c + a
355 remove = d
355 remove = d
356
356
357 p1, p2 = self.dirstate.parents()
357 p1, p2 = self.dirstate.parents()
358 c1 = self.changelog.read(p1)
358 c1 = self.changelog.read(p1)
359 c2 = self.changelog.read(p2)
359 c2 = self.changelog.read(p2)
360 m1 = self.manifest.read(c1[0])
360 m1 = self.manifest.read(c1[0])
361 mf1 = self.manifest.readflags(c1[0])
361 mf1 = self.manifest.readflags(c1[0])
362 m2 = self.manifest.read(c2[0])
362 m2 = self.manifest.read(c2[0])
363
363
364 if not commit and not remove and not force and p2 == nullid:
364 if not commit and not remove and not force and p2 == nullid:
365 self.ui.status(_("nothing changed\n"))
365 self.ui.status(_("nothing changed\n"))
366 return None
366 return None
367
367
368 if not self.hook("precommit"):
368 if not self.hook("precommit"):
369 return None
369 return None
370
370
371 wlock = self.wlock()
371 wlock = self.wlock()
372 lock = self.lock()
372 lock = self.lock()
373 tr = self.transaction()
373 tr = self.transaction()
374
374
375 # check in files
375 # check in files
376 new = {}
376 new = {}
377 linkrev = self.changelog.count()
377 linkrev = self.changelog.count()
378 commit.sort()
378 commit.sort()
379 for f in commit:
379 for f in commit:
380 self.ui.note(f + "\n")
380 self.ui.note(f + "\n")
381 try:
381 try:
382 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
382 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
383 t = self.wread(f)
383 t = self.wread(f)
384 except IOError:
384 except IOError:
385 self.ui.warn(_("trouble committing %s!\n") % f)
385 self.ui.warn(_("trouble committing %s!\n") % f)
386 raise
386 raise
387
387
388 r = self.file(f)
388 r = self.file(f)
389
389
390 meta = {}
390 meta = {}
391 cp = self.dirstate.copied(f)
391 cp = self.dirstate.copied(f)
392 if cp:
392 if cp:
393 meta["copy"] = cp
393 meta["copy"] = cp
394 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
394 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
395 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
395 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
396 fp1, fp2 = nullid, nullid
396 fp1, fp2 = nullid, nullid
397 else:
397 else:
398 fp1 = m1.get(f, nullid)
398 fp1 = m1.get(f, nullid)
399 fp2 = m2.get(f, nullid)
399 fp2 = m2.get(f, nullid)
400
400
401 # is the same revision on two branches of a merge?
401 # is the same revision on two branches of a merge?
402 if fp2 == fp1:
402 if fp2 == fp1:
403 fp2 = nullid
403 fp2 = nullid
404
404
405 if fp2 != nullid:
405 if fp2 != nullid:
406 # is one parent an ancestor of the other?
406 # is one parent an ancestor of the other?
407 fpa = r.ancestor(fp1, fp2)
407 fpa = r.ancestor(fp1, fp2)
408 if fpa == fp1:
408 if fpa == fp1:
409 fp1, fp2 = fp2, nullid
409 fp1, fp2 = fp2, nullid
410 elif fpa == fp2:
410 elif fpa == fp2:
411 fp2 = nullid
411 fp2 = nullid
412
412
413 # is the file unmodified from the parent?
413 # is the file unmodified from the parent?
414 if not meta and t == r.read(fp1):
414 if not meta and t == r.read(fp1):
415 # record the proper existing parent in manifest
415 # record the proper existing parent in manifest
416 # no need to add a revision
416 # no need to add a revision
417 new[f] = fp1
417 new[f] = fp1
418 continue
418 continue
419
419
420 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
420 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
421 # remember what we've added so that we can later calculate
421 # remember what we've added so that we can later calculate
422 # the files to pull from a set of changesets
422 # the files to pull from a set of changesets
423 changed.append(f)
423 changed.append(f)
424
424
425 # update manifest
425 # update manifest
426 m1.update(new)
426 m1.update(new)
427 for f in remove:
427 for f in remove:
428 if f in m1:
428 if f in m1:
429 del m1[f]
429 del m1[f]
430 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
430 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
431 (new, remove))
431 (new, remove))
432
432
433 # add changeset
433 # add changeset
434 new = new.keys()
434 new = new.keys()
435 new.sort()
435 new.sort()
436
436
437 if not text:
437 if not text:
438 edittext = ""
438 edittext = ""
439 if p2 != nullid:
439 if p2 != nullid:
440 edittext += "HG: branch merge\n"
440 edittext += "HG: branch merge\n"
441 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
441 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
442 edittext += "".join(["HG: changed %s\n" % f for f in changed])
442 edittext += "".join(["HG: changed %s\n" % f for f in changed])
443 edittext += "".join(["HG: removed %s\n" % f for f in remove])
443 edittext += "".join(["HG: removed %s\n" % f for f in remove])
444 if not changed and not remove:
444 if not changed and not remove:
445 edittext += "HG: no files changed\n"
445 edittext += "HG: no files changed\n"
446 edittext = self.ui.edit(edittext)
446 edittext = self.ui.edit(edittext)
447 if not edittext.rstrip():
447 if not edittext.rstrip():
448 return None
448 return None
449 text = edittext
449 text = edittext
450
450
451 user = user or self.ui.username()
451 user = user or self.ui.username()
452 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
452 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
453 tr.close()
453 tr.close()
454
454
455 self.dirstate.setparents(n)
455 self.dirstate.setparents(n)
456 self.dirstate.update(new, "n")
456 self.dirstate.update(new, "n")
457 self.dirstate.forget(remove)
457 self.dirstate.forget(remove)
458
458
459 if not self.hook("commit", node=hex(n)):
459 if not self.hook("commit", node=hex(n)):
460 return None
460 return None
461 return n
461 return n
462
462
463 def walk(self, node=None, files=[], match=util.always):
463 def walk(self, node=None, files=[], match=util.always):
464 if node:
464 if node:
465 fdict = dict.fromkeys(files)
465 fdict = dict.fromkeys(files)
466 for fn in self.manifest.read(self.changelog.read(node)[0]):
466 for fn in self.manifest.read(self.changelog.read(node)[0]):
467 fdict.pop(fn, None)
467 fdict.pop(fn, None)
468 if match(fn):
468 if match(fn):
469 yield 'm', fn
469 yield 'm', fn
470 for fn in fdict:
470 for fn in fdict:
471 self.ui.warn(_('%s: No such file in rev %s\n') % (
471 self.ui.warn(_('%s: No such file in rev %s\n') % (
472 util.pathto(self.getcwd(), fn), short(node)))
472 util.pathto(self.getcwd(), fn), short(node)))
473 else:
473 else:
474 for src, fn in self.dirstate.walk(files, match):
474 for src, fn in self.dirstate.walk(files, match):
475 yield src, fn
475 yield src, fn
476
476
477 def changes(self, node1 = None, node2 = None, files = [],
477 def changes(self, node1 = None, node2 = None, files = [],
478 match = util.always):
478 match = util.always):
479 mf2, u = None, []
479 mf2, u = None, []
480
480
481 def fcmp(fn, mf):
481 def fcmp(fn, mf):
482 t1 = self.wread(fn)
482 t1 = self.wread(fn)
483 t2 = self.file(fn).read(mf.get(fn, nullid))
483 t2 = self.file(fn).read(mf.get(fn, nullid))
484 return cmp(t1, t2)
484 return cmp(t1, t2)
485
485
486 def mfmatches(node):
486 def mfmatches(node):
487 mf = dict(self.manifest.read(node))
487 mf = dict(self.manifest.read(node))
488 for fn in mf.keys():
488 for fn in mf.keys():
489 if not match(fn):
489 if not match(fn):
490 del mf[fn]
490 del mf[fn]
491 return mf
491 return mf
492
492
493 # are we comparing the working directory?
493 # are we comparing the working directory?
494 if not node2:
494 if not node2:
495 try:
495 try:
496 wlock = self.wlock(wait=0)
496 wlock = self.wlock(wait=0)
497 except lock.LockHeld:
497 except lock.LockHeld:
498 wlock = None
498 wlock = None
499 l, c, a, d, u = self.dirstate.changes(files, match)
499 l, c, a, d, u = self.dirstate.changes(files, match)
500
500
501 # are we comparing working dir against its parent?
501 # are we comparing working dir against its parent?
502 if not node1:
502 if not node1:
503 if l:
503 if l:
504 # do a full compare of any files that might have changed
504 # do a full compare of any files that might have changed
505 change = self.changelog.read(self.dirstate.parents()[0])
505 change = self.changelog.read(self.dirstate.parents()[0])
506 mf2 = mfmatches(change[0])
506 mf2 = mfmatches(change[0])
507 for f in l:
507 for f in l:
508 if fcmp(f, mf2):
508 if fcmp(f, mf2):
509 c.append(f)
509 c.append(f)
510 elif wlock is not None:
510 elif wlock is not None:
511 self.dirstate.update([f], "n")
511 self.dirstate.update([f], "n")
512
512
513 for l in c, a, d, u:
513 for l in c, a, d, u:
514 l.sort()
514 l.sort()
515
515
516 return (c, a, d, u)
516 return (c, a, d, u)
517
517
518 # are we comparing working dir against non-tip?
518 # are we comparing working dir against non-tip?
519 # generate a pseudo-manifest for the working dir
519 # generate a pseudo-manifest for the working dir
520 if not node2:
520 if not node2:
521 if not mf2:
521 if not mf2:
522 change = self.changelog.read(self.dirstate.parents()[0])
522 change = self.changelog.read(self.dirstate.parents()[0])
523 mf2 = mfmatches(change[0])
523 mf2 = mfmatches(change[0])
524 for f in a + c + l:
524 for f in a + c + l:
525 mf2[f] = ""
525 mf2[f] = ""
526 for f in d:
526 for f in d:
527 if f in mf2: del mf2[f]
527 if f in mf2: del mf2[f]
528 else:
528 else:
529 change = self.changelog.read(node2)
529 change = self.changelog.read(node2)
530 mf2 = mfmatches(change[0])
530 mf2 = mfmatches(change[0])
531
531
532 # flush lists from dirstate before comparing manifests
532 # flush lists from dirstate before comparing manifests
533 c, a = [], []
533 c, a = [], []
534
534
535 change = self.changelog.read(node1)
535 change = self.changelog.read(node1)
536 mf1 = mfmatches(change[0])
536 mf1 = mfmatches(change[0])
537
537
538 for fn in mf2:
538 for fn in mf2:
539 if mf1.has_key(fn):
539 if mf1.has_key(fn):
540 if mf1[fn] != mf2[fn]:
540 if mf1[fn] != mf2[fn]:
541 if mf2[fn] != "" or fcmp(fn, mf1):
541 if mf2[fn] != "" or fcmp(fn, mf1):
542 c.append(fn)
542 c.append(fn)
543 del mf1[fn]
543 del mf1[fn]
544 else:
544 else:
545 a.append(fn)
545 a.append(fn)
546
546
547 d = mf1.keys()
547 d = mf1.keys()
548
548
549 for l in c, a, d, u:
549 for l in c, a, d, u:
550 l.sort()
550 l.sort()
551
551
552 return (c, a, d, u)
552 return (c, a, d, u)
553
553
554 def add(self, list):
554 def add(self, list):
555 wlock = self.wlock()
555 wlock = self.wlock()
556 for f in list:
556 for f in list:
557 p = self.wjoin(f)
557 p = self.wjoin(f)
558 if not os.path.exists(p):
558 if not os.path.exists(p):
559 self.ui.warn(_("%s does not exist!\n") % f)
559 self.ui.warn(_("%s does not exist!\n") % f)
560 elif not os.path.isfile(p):
560 elif not os.path.isfile(p):
561 self.ui.warn(_("%s not added: only files supported currently\n") % f)
561 self.ui.warn(_("%s not added: only files supported currently\n") % f)
562 elif self.dirstate.state(f) in 'an':
562 elif self.dirstate.state(f) in 'an':
563 self.ui.warn(_("%s already tracked!\n") % f)
563 self.ui.warn(_("%s already tracked!\n") % f)
564 else:
564 else:
565 self.dirstate.update([f], "a")
565 self.dirstate.update([f], "a")
566
566
567 def forget(self, list):
567 def forget(self, list):
568 wlock = self.wlock()
568 wlock = self.wlock()
569 for f in list:
569 for f in list:
570 if self.dirstate.state(f) not in 'ai':
570 if self.dirstate.state(f) not in 'ai':
571 self.ui.warn(_("%s not added!\n") % f)
571 self.ui.warn(_("%s not added!\n") % f)
572 else:
572 else:
573 self.dirstate.forget([f])
573 self.dirstate.forget([f])
574
574
575 def remove(self, list, unlink=False):
575 def remove(self, list, unlink=False):
576 if unlink:
576 if unlink:
577 for f in list:
577 for f in list:
578 try:
578 try:
579 util.unlink(self.wjoin(f))
579 util.unlink(self.wjoin(f))
580 except OSError, inst:
580 except OSError, inst:
581 if inst.errno != errno.ENOENT: raise
581 if inst.errno != errno.ENOENT: raise
582 wlock = self.wlock()
582 wlock = self.wlock()
583 for f in list:
583 for f in list:
584 p = self.wjoin(f)
584 p = self.wjoin(f)
585 if os.path.exists(p):
585 if os.path.exists(p):
586 self.ui.warn(_("%s still exists!\n") % f)
586 self.ui.warn(_("%s still exists!\n") % f)
587 elif self.dirstate.state(f) == 'a':
587 elif self.dirstate.state(f) == 'a':
588 self.ui.warn(_("%s never committed!\n") % f)
588 self.ui.warn(_("%s never committed!\n") % f)
589 self.dirstate.forget([f])
589 self.dirstate.forget([f])
590 elif f not in self.dirstate:
590 elif f not in self.dirstate:
591 self.ui.warn(_("%s not tracked!\n") % f)
591 self.ui.warn(_("%s not tracked!\n") % f)
592 else:
592 else:
593 self.dirstate.update([f], "r")
593 self.dirstate.update([f], "r")
594
594
595 def undelete(self, list):
595 def undelete(self, list):
596 p = self.dirstate.parents()[0]
596 p = self.dirstate.parents()[0]
597 mn = self.changelog.read(p)[0]
597 mn = self.changelog.read(p)[0]
598 mf = self.manifest.readflags(mn)
598 mf = self.manifest.readflags(mn)
599 m = self.manifest.read(mn)
599 m = self.manifest.read(mn)
600 wlock = self.wlock()
600 wlock = self.wlock()
601 for f in list:
601 for f in list:
602 if self.dirstate.state(f) not in "r":
602 if self.dirstate.state(f) not in "r":
603 self.ui.warn("%s not removed!\n" % f)
603 self.ui.warn("%s not removed!\n" % f)
604 else:
604 else:
605 t = self.file(f).read(m[f])
605 t = self.file(f).read(m[f])
606 self.wwrite(f, t)
606 self.wwrite(f, t)
607 util.set_exec(self.wjoin(f), mf[f])
607 util.set_exec(self.wjoin(f), mf[f])
608 self.dirstate.update([f], "n")
608 self.dirstate.update([f], "n")
609
609
610 def copy(self, source, dest):
610 def copy(self, source, dest):
611 p = self.wjoin(dest)
611 p = self.wjoin(dest)
612 if not os.path.exists(p):
612 if not os.path.exists(p):
613 self.ui.warn(_("%s does not exist!\n") % dest)
613 self.ui.warn(_("%s does not exist!\n") % dest)
614 elif not os.path.isfile(p):
614 elif not os.path.isfile(p):
615 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
615 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
616 else:
616 else:
617 wlock = self.wlock()
617 wlock = self.wlock()
618 if self.dirstate.state(dest) == '?':
618 if self.dirstate.state(dest) == '?':
619 self.dirstate.update([dest], "a")
619 self.dirstate.update([dest], "a")
620 self.dirstate.copy(source, dest)
620 self.dirstate.copy(source, dest)
621
621
622 def heads(self, start=None):
622 def heads(self, start=None):
623 heads = self.changelog.heads(start)
623 heads = self.changelog.heads(start)
624 # sort the output in rev descending order
624 # sort the output in rev descending order
625 heads = [(-self.changelog.rev(h), h) for h in heads]
625 heads = [(-self.changelog.rev(h), h) for h in heads]
626 heads.sort()
626 heads.sort()
627 return [n for (r, n) in heads]
627 return [n for (r, n) in heads]
628
628
629 # branchlookup returns a dict giving a list of branches for
629 # branchlookup returns a dict giving a list of branches for
630 # each head. A branch is defined as the tag of a node or
630 # each head. A branch is defined as the tag of a node or
631 # the branch of the node's parents. If a node has multiple
631 # the branch of the node's parents. If a node has multiple
632 # branch tags, tags are eliminated if they are visible from other
632 # branch tags, tags are eliminated if they are visible from other
633 # branch tags.
633 # branch tags.
634 #
634 #
635 # So, for this graph: a->b->c->d->e
635 # So, for this graph: a->b->c->d->e
636 # \ /
636 # \ /
637 # aa -----/
637 # aa -----/
638 # a has tag 2.6.12
638 # a has tag 2.6.12
639 # d has tag 2.6.13
639 # d has tag 2.6.13
640 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
640 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
641 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
641 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
642 # from the list.
642 # from the list.
643 #
643 #
644 # It is possible that more than one head will have the same branch tag.
644 # It is possible that more than one head will have the same branch tag.
645 # callers need to check the result for multiple heads under the same
645 # callers need to check the result for multiple heads under the same
646 # branch tag if that is a problem for them (ie checkout of a specific
646 # branch tag if that is a problem for them (ie checkout of a specific
647 # branch).
647 # branch).
648 #
648 #
649 # passing in a specific branch will limit the depth of the search
649 # passing in a specific branch will limit the depth of the search
650 # through the parents. It won't limit the branches returned in the
650 # through the parents. It won't limit the branches returned in the
651 # result though.
651 # result though.
652 def branchlookup(self, heads=None, branch=None):
652 def branchlookup(self, heads=None, branch=None):
653 if not heads:
653 if not heads:
654 heads = self.heads()
654 heads = self.heads()
655 headt = [ h for h in heads ]
655 headt = [ h for h in heads ]
656 chlog = self.changelog
656 chlog = self.changelog
657 branches = {}
657 branches = {}
658 merges = []
658 merges = []
659 seenmerge = {}
659 seenmerge = {}
660
660
661 # traverse the tree once for each head, recording in the branches
661 # traverse the tree once for each head, recording in the branches
662 # dict which tags are visible from this head. The branches
662 # dict which tags are visible from this head. The branches
663 # dict also records which tags are visible from each tag
663 # dict also records which tags are visible from each tag
664 # while we traverse.
664 # while we traverse.
665 while headt or merges:
665 while headt or merges:
666 if merges:
666 if merges:
667 n, found = merges.pop()
667 n, found = merges.pop()
668 visit = [n]
668 visit = [n]
669 else:
669 else:
670 h = headt.pop()
670 h = headt.pop()
671 visit = [h]
671 visit = [h]
672 found = [h]
672 found = [h]
673 seen = {}
673 seen = {}
674 while visit:
674 while visit:
675 n = visit.pop()
675 n = visit.pop()
676 if n in seen:
676 if n in seen:
677 continue
677 continue
678 pp = chlog.parents(n)
678 pp = chlog.parents(n)
679 tags = self.nodetags(n)
679 tags = self.nodetags(n)
680 if tags:
680 if tags:
681 for x in tags:
681 for x in tags:
682 if x == 'tip':
682 if x == 'tip':
683 continue
683 continue
684 for f in found:
684 for f in found:
685 branches.setdefault(f, {})[n] = 1
685 branches.setdefault(f, {})[n] = 1
686 branches.setdefault(n, {})[n] = 1
686 branches.setdefault(n, {})[n] = 1
687 break
687 break
688 if n not in found:
688 if n not in found:
689 found.append(n)
689 found.append(n)
690 if branch in tags:
690 if branch in tags:
691 continue
691 continue
692 seen[n] = 1
692 seen[n] = 1
693 if pp[1] != nullid and n not in seenmerge:
693 if pp[1] != nullid and n not in seenmerge:
694 merges.append((pp[1], [x for x in found]))
694 merges.append((pp[1], [x for x in found]))
695 seenmerge[n] = 1
695 seenmerge[n] = 1
696 if pp[0] != nullid:
696 if pp[0] != nullid:
697 visit.append(pp[0])
697 visit.append(pp[0])
698 # traverse the branches dict, eliminating branch tags from each
698 # traverse the branches dict, eliminating branch tags from each
699 # head that are visible from another branch tag for that head.
699 # head that are visible from another branch tag for that head.
700 out = {}
700 out = {}
701 viscache = {}
701 viscache = {}
702 for h in heads:
702 for h in heads:
703 def visible(node):
703 def visible(node):
704 if node in viscache:
704 if node in viscache:
705 return viscache[node]
705 return viscache[node]
706 ret = {}
706 ret = {}
707 visit = [node]
707 visit = [node]
708 while visit:
708 while visit:
709 x = visit.pop()
709 x = visit.pop()
710 if x in viscache:
710 if x in viscache:
711 ret.update(viscache[x])
711 ret.update(viscache[x])
712 elif x not in ret:
712 elif x not in ret:
713 ret[x] = 1
713 ret[x] = 1
714 if x in branches:
714 if x in branches:
715 visit[len(visit):] = branches[x].keys()
715 visit[len(visit):] = branches[x].keys()
716 viscache[node] = ret
716 viscache[node] = ret
717 return ret
717 return ret
718 if h not in branches:
718 if h not in branches:
719 continue
719 continue
720 # O(n^2), but somewhat limited. This only searches the
720 # O(n^2), but somewhat limited. This only searches the
721 # tags visible from a specific head, not all the tags in the
721 # tags visible from a specific head, not all the tags in the
722 # whole repo.
722 # whole repo.
723 for b in branches[h]:
723 for b in branches[h]:
724 vis = False
724 vis = False
725 for bb in branches[h].keys():
725 for bb in branches[h].keys():
726 if b != bb:
726 if b != bb:
727 if b in visible(bb):
727 if b in visible(bb):
728 vis = True
728 vis = True
729 break
729 break
730 if not vis:
730 if not vis:
731 l = out.setdefault(h, [])
731 l = out.setdefault(h, [])
732 l[len(l):] = self.nodetags(b)
732 l[len(l):] = self.nodetags(b)
733 return out
733 return out
734
734
735 def branches(self, nodes):
735 def branches(self, nodes):
736 if not nodes: nodes = [self.changelog.tip()]
736 if not nodes: nodes = [self.changelog.tip()]
737 b = []
737 b = []
738 for n in nodes:
738 for n in nodes:
739 t = n
739 t = n
740 while n:
740 while n:
741 p = self.changelog.parents(n)
741 p = self.changelog.parents(n)
742 if p[1] != nullid or p[0] == nullid:
742 if p[1] != nullid or p[0] == nullid:
743 b.append((t, n, p[0], p[1]))
743 b.append((t, n, p[0], p[1]))
744 break
744 break
745 n = p[0]
745 n = p[0]
746 return b
746 return b
747
747
748 def between(self, pairs):
748 def between(self, pairs):
749 r = []
749 r = []
750
750
751 for top, bottom in pairs:
751 for top, bottom in pairs:
752 n, l, i = top, [], 0
752 n, l, i = top, [], 0
753 f = 1
753 f = 1
754
754
755 while n != bottom:
755 while n != bottom:
756 p = self.changelog.parents(n)[0]
756 p = self.changelog.parents(n)[0]
757 if i == f:
757 if i == f:
758 l.append(n)
758 l.append(n)
759 f = f * 2
759 f = f * 2
760 n = p
760 n = p
761 i += 1
761 i += 1
762
762
763 r.append(l)
763 r.append(l)
764
764
765 return r
765 return r
766
766
767 def findincoming(self, remote, base=None, heads=None):
767 def findincoming(self, remote, base=None, heads=None):
768 m = self.changelog.nodemap
768 m = self.changelog.nodemap
769 search = []
769 search = []
770 fetch = {}
770 fetch = {}
771 seen = {}
771 seen = {}
772 seenbranch = {}
772 seenbranch = {}
773 if base == None:
773 if base == None:
774 base = {}
774 base = {}
775
775
776 # assume we're closer to the tip than the root
776 # assume we're closer to the tip than the root
777 # and start by examining the heads
777 # and start by examining the heads
778 self.ui.status(_("searching for changes\n"))
778 self.ui.status(_("searching for changes\n"))
779
779
780 if not heads:
780 if not heads:
781 heads = remote.heads()
781 heads = remote.heads()
782
782
783 unknown = []
783 unknown = []
784 for h in heads:
784 for h in heads:
785 if h not in m:
785 if h not in m:
786 unknown.append(h)
786 unknown.append(h)
787 else:
787 else:
788 base[h] = 1
788 base[h] = 1
789
789
790 if not unknown:
790 if not unknown:
791 return None
791 return None
792
792
793 rep = {}
793 rep = {}
794 reqcnt = 0
794 reqcnt = 0
795
795
796 # search through remote branches
796 # search through remote branches
797 # a 'branch' here is a linear segment of history, with four parts:
797 # a 'branch' here is a linear segment of history, with four parts:
798 # head, root, first parent, second parent
798 # head, root, first parent, second parent
799 # (a branch always has two parents (or none) by definition)
799 # (a branch always has two parents (or none) by definition)
800 unknown = remote.branches(unknown)
800 unknown = remote.branches(unknown)
801 while unknown:
801 while unknown:
802 r = []
802 r = []
803 while unknown:
803 while unknown:
804 n = unknown.pop(0)
804 n = unknown.pop(0)
805 if n[0] in seen:
805 if n[0] in seen:
806 continue
806 continue
807
807
808 self.ui.debug(_("examining %s:%s\n") % (short(n[0]), short(n[1])))
808 self.ui.debug(_("examining %s:%s\n") % (short(n[0]), short(n[1])))
809 if n[0] == nullid:
809 if n[0] == nullid:
810 break
810 break
811 if n in seenbranch:
811 if n in seenbranch:
812 self.ui.debug(_("branch already found\n"))
812 self.ui.debug(_("branch already found\n"))
813 continue
813 continue
814 if n[1] and n[1] in m: # do we know the base?
814 if n[1] and n[1] in m: # do we know the base?
815 self.ui.debug(_("found incomplete branch %s:%s\n")
815 self.ui.debug(_("found incomplete branch %s:%s\n")
816 % (short(n[0]), short(n[1])))
816 % (short(n[0]), short(n[1])))
817 search.append(n) # schedule branch range for scanning
817 search.append(n) # schedule branch range for scanning
818 seenbranch[n] = 1
818 seenbranch[n] = 1
819 else:
819 else:
820 if n[1] not in seen and n[1] not in fetch:
820 if n[1] not in seen and n[1] not in fetch:
821 if n[2] in m and n[3] in m:
821 if n[2] in m and n[3] in m:
822 self.ui.debug(_("found new changeset %s\n") %
822 self.ui.debug(_("found new changeset %s\n") %
823 short(n[1]))
823 short(n[1]))
824 fetch[n[1]] = 1 # earliest unknown
824 fetch[n[1]] = 1 # earliest unknown
825 base[n[2]] = 1 # latest known
825 base[n[2]] = 1 # latest known
826 continue
826 continue
827
827
828 for a in n[2:4]:
828 for a in n[2:4]:
829 if a not in rep:
829 if a not in rep:
830 r.append(a)
830 r.append(a)
831 rep[a] = 1
831 rep[a] = 1
832
832
833 seen[n[0]] = 1
833 seen[n[0]] = 1
834
834
835 if r:
835 if r:
836 reqcnt += 1
836 reqcnt += 1
837 self.ui.debug(_("request %d: %s\n") %
837 self.ui.debug(_("request %d: %s\n") %
838 (reqcnt, " ".join(map(short, r))))
838 (reqcnt, " ".join(map(short, r))))
839 for p in range(0, len(r), 10):
839 for p in range(0, len(r), 10):
840 for b in remote.branches(r[p:p+10]):
840 for b in remote.branches(r[p:p+10]):
841 self.ui.debug(_("received %s:%s\n") %
841 self.ui.debug(_("received %s:%s\n") %
842 (short(b[0]), short(b[1])))
842 (short(b[0]), short(b[1])))
843 if b[0] in m:
843 if b[0] in m:
844 self.ui.debug(_("found base node %s\n") % short(b[0]))
844 self.ui.debug(_("found base node %s\n") % short(b[0]))
845 base[b[0]] = 1
845 base[b[0]] = 1
846 elif b[0] not in seen:
846 elif b[0] not in seen:
847 unknown.append(b)
847 unknown.append(b)
848
848
849 # do binary search on the branches we found
849 # do binary search on the branches we found
850 while search:
850 while search:
851 n = search.pop(0)
851 n = search.pop(0)
852 reqcnt += 1
852 reqcnt += 1
853 l = remote.between([(n[0], n[1])])[0]
853 l = remote.between([(n[0], n[1])])[0]
854 l.append(n[1])
854 l.append(n[1])
855 p = n[0]
855 p = n[0]
856 f = 1
856 f = 1
857 for i in l:
857 for i in l:
858 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
858 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
859 if i in m:
859 if i in m:
860 if f <= 2:
860 if f <= 2:
861 self.ui.debug(_("found new branch changeset %s\n") %
861 self.ui.debug(_("found new branch changeset %s\n") %
862 short(p))
862 short(p))
863 fetch[p] = 1
863 fetch[p] = 1
864 base[i] = 1
864 base[i] = 1
865 else:
865 else:
866 self.ui.debug(_("narrowed branch search to %s:%s\n")
866 self.ui.debug(_("narrowed branch search to %s:%s\n")
867 % (short(p), short(i)))
867 % (short(p), short(i)))
868 search.append((p, i))
868 search.append((p, i))
869 break
869 break
870 p, f = i, f * 2
870 p, f = i, f * 2
871
871
872 # sanity check our fetch list
872 # sanity check our fetch list
873 for f in fetch.keys():
873 for f in fetch.keys():
874 if f in m:
874 if f in m:
875 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
875 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
876
876
877 if base.keys() == [nullid]:
877 if base.keys() == [nullid]:
878 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
878 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
879
879
880 self.ui.note(_("found new changesets starting at ") +
880 self.ui.note(_("found new changesets starting at ") +
881 " ".join([short(f) for f in fetch]) + "\n")
881 " ".join([short(f) for f in fetch]) + "\n")
882
882
883 self.ui.debug(_("%d total queries\n") % reqcnt)
883 self.ui.debug(_("%d total queries\n") % reqcnt)
884
884
885 return fetch.keys()
885 return fetch.keys()
886
886
887 def findoutgoing(self, remote, base=None, heads=None):
887 def findoutgoing(self, remote, base=None, heads=None):
888 if base == None:
888 if base == None:
889 base = {}
889 base = {}
890 self.findincoming(remote, base, heads)
890 self.findincoming(remote, base, heads)
891
891
892 self.ui.debug(_("common changesets up to ")
892 self.ui.debug(_("common changesets up to ")
893 + " ".join(map(short, base.keys())) + "\n")
893 + " ".join(map(short, base.keys())) + "\n")
894
894
895 remain = dict.fromkeys(self.changelog.nodemap)
895 remain = dict.fromkeys(self.changelog.nodemap)
896
896
897 # prune everything remote has from the tree
897 # prune everything remote has from the tree
898 del remain[nullid]
898 del remain[nullid]
899 remove = base.keys()
899 remove = base.keys()
900 while remove:
900 while remove:
901 n = remove.pop(0)
901 n = remove.pop(0)
902 if n in remain:
902 if n in remain:
903 del remain[n]
903 del remain[n]
904 for p in self.changelog.parents(n):
904 for p in self.changelog.parents(n):
905 remove.append(p)
905 remove.append(p)
906
906
907 # find every node whose parents have been pruned
907 # find every node whose parents have been pruned
908 subset = []
908 subset = []
909 for n in remain:
909 for n in remain:
910 p1, p2 = self.changelog.parents(n)
910 p1, p2 = self.changelog.parents(n)
911 if p1 not in remain and p2 not in remain:
911 if p1 not in remain and p2 not in remain:
912 subset.append(n)
912 subset.append(n)
913
913
914 # this is the set of all roots we have to push
914 # this is the set of all roots we have to push
915 return subset
915 return subset
916
916
917 def pull(self, remote, heads = None):
917 def pull(self, remote, heads = None):
918 lock = self.lock()
918 lock = self.lock()
919
919
920 # if we have an empty repo, fetch everything
920 # if we have an empty repo, fetch everything
921 if self.changelog.tip() == nullid:
921 if self.changelog.tip() == nullid:
922 self.ui.status(_("requesting all changes\n"))
922 self.ui.status(_("requesting all changes\n"))
923 fetch = [nullid]
923 fetch = [nullid]
924 else:
924 else:
925 fetch = self.findincoming(remote)
925 fetch = self.findincoming(remote)
926
926
927 if not fetch:
927 if not fetch:
928 self.ui.status(_("no changes found\n"))
928 self.ui.status(_("no changes found\n"))
929 return 1
929 return 1
930
930
931 if heads is None:
931 if heads is None:
932 cg = remote.changegroup(fetch)
932 cg = remote.changegroup(fetch)
933 else:
933 else:
934 cg = remote.changegroupsubset(fetch, heads)
934 cg = remote.changegroupsubset(fetch, heads)
935 return self.addchangegroup(cg)
935 return self.addchangegroup(cg)
936
936
937 def push(self, remote, force=False):
937 def push(self, remote, force=False):
938 lock = remote.lock()
938 lock = remote.lock()
939
939
940 base = {}
940 base = {}
941 heads = remote.heads()
941 heads = remote.heads()
942 inc = self.findincoming(remote, base, heads)
942 inc = self.findincoming(remote, base, heads)
943 if not force and inc:
943 if not force and inc:
944 self.ui.warn(_("abort: unsynced remote changes!\n"))
944 self.ui.warn(_("abort: unsynced remote changes!\n"))
945 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
945 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
946 return 1
946 return 1
947
947
948 update = self.findoutgoing(remote, base)
948 update = self.findoutgoing(remote, base)
949 if not update:
949 if not update:
950 self.ui.status(_("no changes found\n"))
950 self.ui.status(_("no changes found\n"))
951 return 1
951 return 1
952 elif not force:
952 elif not force:
953 if len(heads) < len(self.changelog.heads()):
953 if len(heads) < len(self.changelog.heads()):
954 self.ui.warn(_("abort: push creates new remote branches!\n"))
954 self.ui.warn(_("abort: push creates new remote branches!\n"))
955 self.ui.status(_("(did you forget to merge?"
955 self.ui.status(_("(did you forget to merge?"
956 " use push -f to force)\n"))
956 " use push -f to force)\n"))
957 return 1
957 return 1
958
958
959 cg = self.changegroup(update)
959 cg = self.changegroup(update)
960 return remote.addchangegroup(cg)
960 return remote.addchangegroup(cg)
961
961
962 def changegroupsubset(self, bases, heads):
962 def changegroupsubset(self, bases, heads):
963 """This function generates a changegroup consisting of all the nodes
963 """This function generates a changegroup consisting of all the nodes
964 that are descendents of any of the bases, and ancestors of any of
964 that are descendents of any of the bases, and ancestors of any of
965 the heads.
965 the heads.
966
966
967 It is fairly complex as determining which filenodes and which
967 It is fairly complex as determining which filenodes and which
968 manifest nodes need to be included for the changeset to be complete
968 manifest nodes need to be included for the changeset to be complete
969 is non-trivial.
969 is non-trivial.
970
970
971 Another wrinkle is doing the reverse, figuring out which changeset in
971 Another wrinkle is doing the reverse, figuring out which changeset in
972 the changegroup a particular filenode or manifestnode belongs to."""
972 the changegroup a particular filenode or manifestnode belongs to."""
973
973
974 # Set up some initial variables
974 # Set up some initial variables
975 # Make it easy to refer to self.changelog
975 # Make it easy to refer to self.changelog
976 cl = self.changelog
976 cl = self.changelog
977 # msng is short for missing - compute the list of changesets in this
977 # msng is short for missing - compute the list of changesets in this
978 # changegroup.
978 # changegroup.
979 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
979 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
980 # Some bases may turn out to be superfluous, and some heads may be
980 # Some bases may turn out to be superfluous, and some heads may be
981 # too. nodesbetween will return the minimal set of bases and heads
981 # too. nodesbetween will return the minimal set of bases and heads
982 # necessary to re-create the changegroup.
982 # necessary to re-create the changegroup.
983
983
984 # Known heads are the list of heads that it is assumed the recipient
984 # Known heads are the list of heads that it is assumed the recipient
985 # of this changegroup will know about.
985 # of this changegroup will know about.
986 knownheads = {}
986 knownheads = {}
987 # We assume that all parents of bases are known heads.
987 # We assume that all parents of bases are known heads.
988 for n in bases:
988 for n in bases:
989 for p in cl.parents(n):
989 for p in cl.parents(n):
990 if p != nullid:
990 if p != nullid:
991 knownheads[p] = 1
991 knownheads[p] = 1
992 knownheads = knownheads.keys()
992 knownheads = knownheads.keys()
993 if knownheads:
993 if knownheads:
994 # Now that we know what heads are known, we can compute which
994 # Now that we know what heads are known, we can compute which
995 # changesets are known. The recipient must know about all
995 # changesets are known. The recipient must know about all
996 # changesets required to reach the known heads from the null
996 # changesets required to reach the known heads from the null
997 # changeset.
997 # changeset.
998 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
998 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
999 junk = None
999 junk = None
1000 # Transform the list into an ersatz set.
1000 # Transform the list into an ersatz set.
1001 has_cl_set = dict.fromkeys(has_cl_set)
1001 has_cl_set = dict.fromkeys(has_cl_set)
1002 else:
1002 else:
1003 # If there were no known heads, the recipient cannot be assumed to
1003 # If there were no known heads, the recipient cannot be assumed to
1004 # know about any changesets.
1004 # know about any changesets.
1005 has_cl_set = {}
1005 has_cl_set = {}
1006
1006
1007 # Make it easy to refer to self.manifest
1007 # Make it easy to refer to self.manifest
1008 mnfst = self.manifest
1008 mnfst = self.manifest
1009 # We don't know which manifests are missing yet
1009 # We don't know which manifests are missing yet
1010 msng_mnfst_set = {}
1010 msng_mnfst_set = {}
1011 # Nor do we know which filenodes are missing.
1011 # Nor do we know which filenodes are missing.
1012 msng_filenode_set = {}
1012 msng_filenode_set = {}
1013
1013
1014 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1014 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1015 junk = None
1015 junk = None
1016
1016
1017 # A changeset always belongs to itself, so the changenode lookup
1017 # A changeset always belongs to itself, so the changenode lookup
1018 # function for a changenode is identity.
1018 # function for a changenode is identity.
1019 def identity(x):
1019 def identity(x):
1020 return x
1020 return x
1021
1021
1022 # A function generating function. Sets up an environment for the
1022 # A function generating function. Sets up an environment for the
1023 # inner function.
1023 # inner function.
1024 def cmp_by_rev_func(revlog):
1024 def cmp_by_rev_func(revlog):
1025 # Compare two nodes by their revision number in the environment's
1025 # Compare two nodes by their revision number in the environment's
1026 # revision history. Since the revision number both represents the
1026 # revision history. Since the revision number both represents the
1027 # most efficient order to read the nodes in, and represents a
1027 # most efficient order to read the nodes in, and represents a
1028 # topological sorting of the nodes, this function is often useful.
1028 # topological sorting of the nodes, this function is often useful.
1029 def cmp_by_rev(a, b):
1029 def cmp_by_rev(a, b):
1030 return cmp(revlog.rev(a), revlog.rev(b))
1030 return cmp(revlog.rev(a), revlog.rev(b))
1031 return cmp_by_rev
1031 return cmp_by_rev
1032
1032
1033 # If we determine that a particular file or manifest node must be a
1033 # If we determine that a particular file or manifest node must be a
1034 # node that the recipient of the changegroup will already have, we can
1034 # node that the recipient of the changegroup will already have, we can
1035 # also assume the recipient will have all the parents. This function
1035 # also assume the recipient will have all the parents. This function
1036 # prunes them from the set of missing nodes.
1036 # prunes them from the set of missing nodes.
1037 def prune_parents(revlog, hasset, msngset):
1037 def prune_parents(revlog, hasset, msngset):
1038 haslst = hasset.keys()
1038 haslst = hasset.keys()
1039 haslst.sort(cmp_by_rev_func(revlog))
1039 haslst.sort(cmp_by_rev_func(revlog))
1040 for node in haslst:
1040 for node in haslst:
1041 parentlst = [p for p in revlog.parents(node) if p != nullid]
1041 parentlst = [p for p in revlog.parents(node) if p != nullid]
1042 while parentlst:
1042 while parentlst:
1043 n = parentlst.pop()
1043 n = parentlst.pop()
1044 if n not in hasset:
1044 if n not in hasset:
1045 hasset[n] = 1
1045 hasset[n] = 1
1046 p = [p for p in revlog.parents(n) if p != nullid]
1046 p = [p for p in revlog.parents(n) if p != nullid]
1047 parentlst.extend(p)
1047 parentlst.extend(p)
1048 for n in hasset:
1048 for n in hasset:
1049 msngset.pop(n, None)
1049 msngset.pop(n, None)
1050
1050
1051 # This is a function generating function used to set up an environment
1051 # This is a function generating function used to set up an environment
1052 # for the inner function to execute in.
1052 # for the inner function to execute in.
1053 def manifest_and_file_collector(changedfileset):
1053 def manifest_and_file_collector(changedfileset):
1054 # This is an information gathering function that gathers
1054 # This is an information gathering function that gathers
1055 # information from each changeset node that goes out as part of
1055 # information from each changeset node that goes out as part of
1056 # the changegroup. The information gathered is a list of which
1056 # the changegroup. The information gathered is a list of which
1057 # manifest nodes are potentially required (the recipient may
1057 # manifest nodes are potentially required (the recipient may
1058 # already have them) and total list of all files which were
1058 # already have them) and total list of all files which were
1059 # changed in any changeset in the changegroup.
1059 # changed in any changeset in the changegroup.
1060 #
1060 #
1061 # We also remember the first changenode we saw any manifest
1061 # We also remember the first changenode we saw any manifest
1062 # referenced by so we can later determine which changenode 'owns'
1062 # referenced by so we can later determine which changenode 'owns'
1063 # the manifest.
1063 # the manifest.
1064 def collect_manifests_and_files(clnode):
1064 def collect_manifests_and_files(clnode):
1065 c = cl.read(clnode)
1065 c = cl.read(clnode)
1066 for f in c[3]:
1066 for f in c[3]:
1067 # This is to make sure we only have one instance of each
1067 # This is to make sure we only have one instance of each
1068 # filename string for each filename.
1068 # filename string for each filename.
1069 changedfileset.setdefault(f, f)
1069 changedfileset.setdefault(f, f)
1070 msng_mnfst_set.setdefault(c[0], clnode)
1070 msng_mnfst_set.setdefault(c[0], clnode)
1071 return collect_manifests_and_files
1071 return collect_manifests_and_files
1072
1072
1073 # Figure out which manifest nodes (of the ones we think might be part
1073 # Figure out which manifest nodes (of the ones we think might be part
1074 # of the changegroup) the recipient must know about and remove them
1074 # of the changegroup) the recipient must know about and remove them
1075 # from the changegroup.
1075 # from the changegroup.
1076 def prune_manifests():
1076 def prune_manifests():
1077 has_mnfst_set = {}
1077 has_mnfst_set = {}
1078 for n in msng_mnfst_set:
1078 for n in msng_mnfst_set:
1079 # If a 'missing' manifest thinks it belongs to a changenode
1079 # If a 'missing' manifest thinks it belongs to a changenode
1080 # the recipient is assumed to have, obviously the recipient
1080 # the recipient is assumed to have, obviously the recipient
1081 # must have that manifest.
1081 # must have that manifest.
1082 linknode = cl.node(mnfst.linkrev(n))
1082 linknode = cl.node(mnfst.linkrev(n))
1083 if linknode in has_cl_set:
1083 if linknode in has_cl_set:
1084 has_mnfst_set[n] = 1
1084 has_mnfst_set[n] = 1
1085 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1085 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1086
1086
1087 # Use the information collected in collect_manifests_and_files to say
1087 # Use the information collected in collect_manifests_and_files to say
1088 # which changenode any manifestnode belongs to.
1088 # which changenode any manifestnode belongs to.
1089 def lookup_manifest_link(mnfstnode):
1089 def lookup_manifest_link(mnfstnode):
1090 return msng_mnfst_set[mnfstnode]
1090 return msng_mnfst_set[mnfstnode]
1091
1091
1092 # A function generating function that sets up the initial environment
1092 # A function generating function that sets up the initial environment
1093 # the inner function.
1093 # the inner function.
1094 def filenode_collector(changedfiles):
1094 def filenode_collector(changedfiles):
1095 next_rev = [0]
1095 next_rev = [0]
1096 # This gathers information from each manifestnode included in the
1096 # This gathers information from each manifestnode included in the
1097 # changegroup about which filenodes the manifest node references
1097 # changegroup about which filenodes the manifest node references
1098 # so we can include those in the changegroup too.
1098 # so we can include those in the changegroup too.
1099 #
1099 #
1100 # It also remembers which changenode each filenode belongs to. It
1100 # It also remembers which changenode each filenode belongs to. It
1101 # does this by assuming the a filenode belongs to the changenode
1101 # does this by assuming the a filenode belongs to the changenode
1102 # the first manifest that references it belongs to.
1102 # the first manifest that references it belongs to.
1103 def collect_msng_filenodes(mnfstnode):
1103 def collect_msng_filenodes(mnfstnode):
1104 r = mnfst.rev(mnfstnode)
1104 r = mnfst.rev(mnfstnode)
1105 if r == next_rev[0]:
1105 if r == next_rev[0]:
1106 # If the last rev we looked at was the one just previous,
1106 # If the last rev we looked at was the one just previous,
1107 # we only need to see a diff.
1107 # we only need to see a diff.
1108 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1108 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1109 # For each line in the delta
1109 # For each line in the delta
1110 for dline in delta.splitlines():
1110 for dline in delta.splitlines():
1111 # get the filename and filenode for that line
1111 # get the filename and filenode for that line
1112 f, fnode = dline.split('\0')
1112 f, fnode = dline.split('\0')
1113 fnode = bin(fnode[:40])
1113 fnode = bin(fnode[:40])
1114 f = changedfiles.get(f, None)
1114 f = changedfiles.get(f, None)
1115 # And if the file is in the list of files we care
1115 # And if the file is in the list of files we care
1116 # about.
1116 # about.
1117 if f is not None:
1117 if f is not None:
1118 # Get the changenode this manifest belongs to
1118 # Get the changenode this manifest belongs to
1119 clnode = msng_mnfst_set[mnfstnode]
1119 clnode = msng_mnfst_set[mnfstnode]
1120 # Create the set of filenodes for the file if
1120 # Create the set of filenodes for the file if
1121 # there isn't one already.
1121 # there isn't one already.
1122 ndset = msng_filenode_set.setdefault(f, {})
1122 ndset = msng_filenode_set.setdefault(f, {})
1123 # And set the filenode's changelog node to the
1123 # And set the filenode's changelog node to the
1124 # manifest's if it hasn't been set already.
1124 # manifest's if it hasn't been set already.
1125 ndset.setdefault(fnode, clnode)
1125 ndset.setdefault(fnode, clnode)
1126 else:
1126 else:
1127 # Otherwise we need a full manifest.
1127 # Otherwise we need a full manifest.
1128 m = mnfst.read(mnfstnode)
1128 m = mnfst.read(mnfstnode)
1129 # For every file in we care about.
1129 # For every file in we care about.
1130 for f in changedfiles:
1130 for f in changedfiles:
1131 fnode = m.get(f, None)
1131 fnode = m.get(f, None)
1132 # If it's in the manifest
1132 # If it's in the manifest
1133 if fnode is not None:
1133 if fnode is not None:
1134 # See comments above.
1134 # See comments above.
1135 clnode = msng_mnfst_set[mnfstnode]
1135 clnode = msng_mnfst_set[mnfstnode]
1136 ndset = msng_filenode_set.setdefault(f, {})
1136 ndset = msng_filenode_set.setdefault(f, {})
1137 ndset.setdefault(fnode, clnode)
1137 ndset.setdefault(fnode, clnode)
1138 # Remember the revision we hope to see next.
1138 # Remember the revision we hope to see next.
1139 next_rev[0] = r + 1
1139 next_rev[0] = r + 1
1140 return collect_msng_filenodes
1140 return collect_msng_filenodes
1141
1141
1142 # We have a list of filenodes we think we need for a file, lets remove
1142 # We have a list of filenodes we think we need for a file, lets remove
1143 # all those we now the recipient must have.
1143 # all those we now the recipient must have.
1144 def prune_filenodes(f, filerevlog):
1144 def prune_filenodes(f, filerevlog):
1145 msngset = msng_filenode_set[f]
1145 msngset = msng_filenode_set[f]
1146 hasset = {}
1146 hasset = {}
1147 # If a 'missing' filenode thinks it belongs to a changenode we
1147 # If a 'missing' filenode thinks it belongs to a changenode we
1148 # assume the recipient must have, then the recipient must have
1148 # assume the recipient must have, then the recipient must have
1149 # that filenode.
1149 # that filenode.
1150 for n in msngset:
1150 for n in msngset:
1151 clnode = cl.node(filerevlog.linkrev(n))
1151 clnode = cl.node(filerevlog.linkrev(n))
1152 if clnode in has_cl_set:
1152 if clnode in has_cl_set:
1153 hasset[n] = 1
1153 hasset[n] = 1
1154 prune_parents(filerevlog, hasset, msngset)
1154 prune_parents(filerevlog, hasset, msngset)
1155
1155
1156 # A function generator function that sets up the a context for the
1156 # A function generator function that sets up the a context for the
1157 # inner function.
1157 # inner function.
1158 def lookup_filenode_link_func(fname):
1158 def lookup_filenode_link_func(fname):
1159 msngset = msng_filenode_set[fname]
1159 msngset = msng_filenode_set[fname]
1160 # Lookup the changenode the filenode belongs to.
1160 # Lookup the changenode the filenode belongs to.
1161 def lookup_filenode_link(fnode):
1161 def lookup_filenode_link(fnode):
1162 return msngset[fnode]
1162 return msngset[fnode]
1163 return lookup_filenode_link
1163 return lookup_filenode_link
1164
1164
1165 # Now that we have all theses utility functions to help out and
1165 # Now that we have all theses utility functions to help out and
1166 # logically divide up the task, generate the group.
1166 # logically divide up the task, generate the group.
1167 def gengroup():
1167 def gengroup():
1168 # The set of changed files starts empty.
1168 # The set of changed files starts empty.
1169 changedfiles = {}
1169 changedfiles = {}
1170 # Create a changenode group generator that will call our functions
1170 # Create a changenode group generator that will call our functions
1171 # back to lookup the owning changenode and collect information.
1171 # back to lookup the owning changenode and collect information.
1172 group = cl.group(msng_cl_lst, identity,
1172 group = cl.group(msng_cl_lst, identity,
1173 manifest_and_file_collector(changedfiles))
1173 manifest_and_file_collector(changedfiles))
1174 for chnk in group:
1174 for chnk in group:
1175 yield chnk
1175 yield chnk
1176
1176
1177 # The list of manifests has been collected by the generator
1177 # The list of manifests has been collected by the generator
1178 # calling our functions back.
1178 # calling our functions back.
1179 prune_manifests()
1179 prune_manifests()
1180 msng_mnfst_lst = msng_mnfst_set.keys()
1180 msng_mnfst_lst = msng_mnfst_set.keys()
1181 # Sort the manifestnodes by revision number.
1181 # Sort the manifestnodes by revision number.
1182 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1182 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1183 # Create a generator for the manifestnodes that calls our lookup
1183 # Create a generator for the manifestnodes that calls our lookup
1184 # and data collection functions back.
1184 # and data collection functions back.
1185 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1185 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1186 filenode_collector(changedfiles))
1186 filenode_collector(changedfiles))
1187 for chnk in group:
1187 for chnk in group:
1188 yield chnk
1188 yield chnk
1189
1189
1190 # These are no longer needed, dereference and toss the memory for
1190 # These are no longer needed, dereference and toss the memory for
1191 # them.
1191 # them.
1192 msng_mnfst_lst = None
1192 msng_mnfst_lst = None
1193 msng_mnfst_set.clear()
1193 msng_mnfst_set.clear()
1194
1194
1195 changedfiles = changedfiles.keys()
1195 changedfiles = changedfiles.keys()
1196 changedfiles.sort()
1196 changedfiles.sort()
1197 # Go through all our files in order sorted by name.
1197 # Go through all our files in order sorted by name.
1198 for fname in changedfiles:
1198 for fname in changedfiles:
1199 filerevlog = self.file(fname)
1199 filerevlog = self.file(fname)
1200 # Toss out the filenodes that the recipient isn't really
1200 # Toss out the filenodes that the recipient isn't really
1201 # missing.
1201 # missing.
1202 prune_filenodes(fname, filerevlog)
1202 prune_filenodes(fname, filerevlog)
1203 msng_filenode_lst = msng_filenode_set[fname].keys()
1203 msng_filenode_lst = msng_filenode_set[fname].keys()
1204 # If any filenodes are left, generate the group for them,
1204 # If any filenodes are left, generate the group for them,
1205 # otherwise don't bother.
1205 # otherwise don't bother.
1206 if len(msng_filenode_lst) > 0:
1206 if len(msng_filenode_lst) > 0:
1207 yield struct.pack(">l", len(fname) + 4) + fname
1207 yield struct.pack(">l", len(fname) + 4) + fname
1208 # Sort the filenodes by their revision #
1208 # Sort the filenodes by their revision #
1209 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1209 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1210 # Create a group generator and only pass in a changenode
1210 # Create a group generator and only pass in a changenode
1211 # lookup function as we need to collect no information
1211 # lookup function as we need to collect no information
1212 # from filenodes.
1212 # from filenodes.
1213 group = filerevlog.group(msng_filenode_lst,
1213 group = filerevlog.group(msng_filenode_lst,
1214 lookup_filenode_link_func(fname))
1214 lookup_filenode_link_func(fname))
1215 for chnk in group:
1215 for chnk in group:
1216 yield chnk
1216 yield chnk
1217 # Don't need this anymore, toss it to free memory.
1217 # Don't need this anymore, toss it to free memory.
1218 del msng_filenode_set[fname]
1218 del msng_filenode_set[fname]
1219 # Signal that no more groups are left.
1219 # Signal that no more groups are left.
1220 yield struct.pack(">l", 0)
1220 yield struct.pack(">l", 0)
1221
1221
1222 return util.chunkbuffer(gengroup())
1222 return util.chunkbuffer(gengroup())
1223
1223
1224 def changegroup(self, basenodes):
1224 def changegroup(self, basenodes):
1225 """Generate a changegroup of all nodes that we have that a recipient
1225 """Generate a changegroup of all nodes that we have that a recipient
1226 doesn't.
1226 doesn't.
1227
1227
1228 This is much easier than the previous function as we can assume that
1228 This is much easier than the previous function as we can assume that
1229 the recipient has any changenode we aren't sending them."""
1229 the recipient has any changenode we aren't sending them."""
1230 cl = self.changelog
1230 cl = self.changelog
1231 nodes = cl.nodesbetween(basenodes, None)[0]
1231 nodes = cl.nodesbetween(basenodes, None)[0]
1232 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1232 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1233
1233
1234 def identity(x):
1234 def identity(x):
1235 return x
1235 return x
1236
1236
1237 def gennodelst(revlog):
1237 def gennodelst(revlog):
1238 for r in xrange(0, revlog.count()):
1238 for r in xrange(0, revlog.count()):
1239 n = revlog.node(r)
1239 n = revlog.node(r)
1240 if revlog.linkrev(n) in revset:
1240 if revlog.linkrev(n) in revset:
1241 yield n
1241 yield n
1242
1242
1243 def changed_file_collector(changedfileset):
1243 def changed_file_collector(changedfileset):
1244 def collect_changed_files(clnode):
1244 def collect_changed_files(clnode):
1245 c = cl.read(clnode)
1245 c = cl.read(clnode)
1246 for fname in c[3]:
1246 for fname in c[3]:
1247 changedfileset[fname] = 1
1247 changedfileset[fname] = 1
1248 return collect_changed_files
1248 return collect_changed_files
1249
1249
1250 def lookuprevlink_func(revlog):
1250 def lookuprevlink_func(revlog):
1251 def lookuprevlink(n):
1251 def lookuprevlink(n):
1252 return cl.node(revlog.linkrev(n))
1252 return cl.node(revlog.linkrev(n))
1253 return lookuprevlink
1253 return lookuprevlink
1254
1254
1255 def gengroup():
1255 def gengroup():
1256 # construct a list of all changed files
1256 # construct a list of all changed files
1257 changedfiles = {}
1257 changedfiles = {}
1258
1258
1259 for chnk in cl.group(nodes, identity,
1259 for chnk in cl.group(nodes, identity,
1260 changed_file_collector(changedfiles)):
1260 changed_file_collector(changedfiles)):
1261 yield chnk
1261 yield chnk
1262 changedfiles = changedfiles.keys()
1262 changedfiles = changedfiles.keys()
1263 changedfiles.sort()
1263 changedfiles.sort()
1264
1264
1265 mnfst = self.manifest
1265 mnfst = self.manifest
1266 nodeiter = gennodelst(mnfst)
1266 nodeiter = gennodelst(mnfst)
1267 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1267 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1268 yield chnk
1268 yield chnk
1269
1269
1270 for fname in changedfiles:
1270 for fname in changedfiles:
1271 filerevlog = self.file(fname)
1271 filerevlog = self.file(fname)
1272 nodeiter = gennodelst(filerevlog)
1272 nodeiter = gennodelst(filerevlog)
1273 nodeiter = list(nodeiter)
1273 nodeiter = list(nodeiter)
1274 if nodeiter:
1274 if nodeiter:
1275 yield struct.pack(">l", len(fname) + 4) + fname
1275 yield struct.pack(">l", len(fname) + 4) + fname
1276 lookup = lookuprevlink_func(filerevlog)
1276 lookup = lookuprevlink_func(filerevlog)
1277 for chnk in filerevlog.group(nodeiter, lookup):
1277 for chnk in filerevlog.group(nodeiter, lookup):
1278 yield chnk
1278 yield chnk
1279
1279
1280 yield struct.pack(">l", 0)
1280 yield struct.pack(">l", 0)
1281
1281
1282 return util.chunkbuffer(gengroup())
1282 return util.chunkbuffer(gengroup())
1283
1283
1284 def addchangegroup(self, source):
1284 def addchangegroup(self, source):
1285
1285
1286 def getchunk():
1286 def getchunk():
1287 d = source.read(4)
1287 d = source.read(4)
1288 if not d: return ""
1288 if not d: return ""
1289 l = struct.unpack(">l", d)[0]
1289 l = struct.unpack(">l", d)[0]
1290 if l <= 4: return ""
1290 if l <= 4: return ""
1291 d = source.read(l - 4)
1291 d = source.read(l - 4)
1292 if len(d) < l - 4:
1292 if len(d) < l - 4:
1293 raise repo.RepoError(_("premature EOF reading chunk"
1293 raise repo.RepoError(_("premature EOF reading chunk"
1294 " (got %d bytes, expected %d)")
1294 " (got %d bytes, expected %d)")
1295 % (len(d), l - 4))
1295 % (len(d), l - 4))
1296 return d
1296 return d
1297
1297
1298 def getgroup():
1298 def getgroup():
1299 while 1:
1299 while 1:
1300 c = getchunk()
1300 c = getchunk()
1301 if not c: break
1301 if not c: break
1302 yield c
1302 yield c
1303
1303
1304 def csmap(x):
1304 def csmap(x):
1305 self.ui.debug(_("add changeset %s\n") % short(x))
1305 self.ui.debug(_("add changeset %s\n") % short(x))
1306 return self.changelog.count()
1306 return self.changelog.count()
1307
1307
1308 def revmap(x):
1308 def revmap(x):
1309 return self.changelog.rev(x)
1309 return self.changelog.rev(x)
1310
1310
1311 if not source: return
1311 if not source: return
1312 changesets = files = revisions = 0
1312 changesets = files = revisions = 0
1313
1313
1314 tr = self.transaction()
1314 tr = self.transaction()
1315
1315
1316 oldheads = len(self.changelog.heads())
1316 oldheads = len(self.changelog.heads())
1317
1317
1318 # pull off the changeset group
1318 # pull off the changeset group
1319 self.ui.status(_("adding changesets\n"))
1319 self.ui.status(_("adding changesets\n"))
1320 co = self.changelog.tip()
1320 co = self.changelog.tip()
1321 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1321 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1322 cnr, cor = map(self.changelog.rev, (cn, co))
1322 cnr, cor = map(self.changelog.rev, (cn, co))
1323 if cn == nullid:
1323 if cn == nullid:
1324 cnr = cor
1324 cnr = cor
1325 changesets = cnr - cor
1325 changesets = cnr - cor
1326
1326
1327 # pull off the manifest group
1327 # pull off the manifest group
1328 self.ui.status(_("adding manifests\n"))
1328 self.ui.status(_("adding manifests\n"))
1329 mm = self.manifest.tip()
1329 mm = self.manifest.tip()
1330 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1330 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1331
1331
1332 # process the files
1332 # process the files
1333 self.ui.status(_("adding file changes\n"))
1333 self.ui.status(_("adding file changes\n"))
1334 while 1:
1334 while 1:
1335 f = getchunk()
1335 f = getchunk()
1336 if not f: break
1336 if not f: break
1337 self.ui.debug(_("adding %s revisions\n") % f)
1337 self.ui.debug(_("adding %s revisions\n") % f)
1338 fl = self.file(f)
1338 fl = self.file(f)
1339 o = fl.count()
1339 o = fl.count()
1340 n = fl.addgroup(getgroup(), revmap, tr)
1340 n = fl.addgroup(getgroup(), revmap, tr)
1341 revisions += fl.count() - o
1341 revisions += fl.count() - o
1342 files += 1
1342 files += 1
1343
1343
1344 newheads = len(self.changelog.heads())
1344 newheads = len(self.changelog.heads())
1345 heads = ""
1345 heads = ""
1346 if oldheads and newheads > oldheads:
1346 if oldheads and newheads > oldheads:
1347 heads = _(" (+%d heads)") % (newheads - oldheads)
1347 heads = _(" (+%d heads)") % (newheads - oldheads)
1348
1348
1349 self.ui.status(_("added %d changesets"
1349 self.ui.status(_("added %d changesets"
1350 " with %d changes to %d files%s\n")
1350 " with %d changes to %d files%s\n")
1351 % (changesets, revisions, files, heads))
1351 % (changesets, revisions, files, heads))
1352
1352
1353 tr.close()
1353 tr.close()
1354
1354
1355 if changesets > 0:
1355 if changesets > 0:
1356 if not self.hook("changegroup",
1356 if not self.hook("changegroup",
1357 node=hex(self.changelog.node(cor+1))):
1357 node=hex(self.changelog.node(cor+1))):
1358 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1358 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1359 return 1
1359 return 1
1360
1360
1361 for i in range(cor + 1, cnr + 1):
1361 for i in range(cor + 1, cnr + 1):
1362 self.hook("commit", node=hex(self.changelog.node(i)))
1362 self.hook("commit", node=hex(self.changelog.node(i)))
1363
1363
1364 return
1364 return
1365
1365
1366 def update(self, node, allow=False, force=False, choose=None,
1366 def update(self, node, allow=False, force=False, choose=None,
1367 moddirstate=True):
1367 moddirstate=True):
1368 pl = self.dirstate.parents()
1368 pl = self.dirstate.parents()
1369 if not force and pl[1] != nullid:
1369 if not force and pl[1] != nullid:
1370 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1370 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1371 return 1
1371 return 1
1372
1372
1373 p1, p2 = pl[0], node
1373 p1, p2 = pl[0], node
1374 pa = self.changelog.ancestor(p1, p2)
1374 pa = self.changelog.ancestor(p1, p2)
1375 m1n = self.changelog.read(p1)[0]
1375 m1n = self.changelog.read(p1)[0]
1376 m2n = self.changelog.read(p2)[0]
1376 m2n = self.changelog.read(p2)[0]
1377 man = self.manifest.ancestor(m1n, m2n)
1377 man = self.manifest.ancestor(m1n, m2n)
1378 m1 = self.manifest.read(m1n)
1378 m1 = self.manifest.read(m1n)
1379 mf1 = self.manifest.readflags(m1n)
1379 mf1 = self.manifest.readflags(m1n)
1380 m2 = self.manifest.read(m2n)
1380 m2 = self.manifest.read(m2n)
1381 mf2 = self.manifest.readflags(m2n)
1381 mf2 = self.manifest.readflags(m2n)
1382 ma = self.manifest.read(man)
1382 ma = self.manifest.read(man)
1383 mfa = self.manifest.readflags(man)
1383 mfa = self.manifest.readflags(man)
1384
1384
1385 (c, a, d, u) = self.changes()
1385 (c, a, d, u) = self.changes()
1386
1386
1387 # is this a jump, or a merge? i.e. is there a linear path
1387 # is this a jump, or a merge? i.e. is there a linear path
1388 # from p1 to p2?
1388 # from p1 to p2?
1389 linear_path = (pa == p1 or pa == p2)
1389 linear_path = (pa == p1 or pa == p2)
1390
1390
1391 # resolve the manifest to determine which files
1391 # resolve the manifest to determine which files
1392 # we care about merging
1392 # we care about merging
1393 self.ui.note(_("resolving manifests\n"))
1393 self.ui.note(_("resolving manifests\n"))
1394 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1394 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1395 (force, allow, moddirstate, linear_path))
1395 (force, allow, moddirstate, linear_path))
1396 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1396 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1397 (short(man), short(m1n), short(m2n)))
1397 (short(man), short(m1n), short(m2n)))
1398
1398
1399 merge = {}
1399 merge = {}
1400 get = {}
1400 get = {}
1401 remove = []
1401 remove = []
1402
1402
1403 # construct a working dir manifest
1403 # construct a working dir manifest
1404 mw = m1.copy()
1404 mw = m1.copy()
1405 mfw = mf1.copy()
1405 mfw = mf1.copy()
1406 umap = dict.fromkeys(u)
1406 umap = dict.fromkeys(u)
1407
1407
1408 for f in a + c + u:
1408 for f in a + c + u:
1409 mw[f] = ""
1409 mw[f] = ""
1410 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1410 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1411
1411
1412 if moddirstate:
1412 if moddirstate:
1413 wlock = self.wlock()
1413 wlock = self.wlock()
1414
1414
1415 for f in d:
1415 for f in d:
1416 if f in mw: del mw[f]
1416 if f in mw: del mw[f]
1417
1417
1418 # If we're jumping between revisions (as opposed to merging),
1418 # If we're jumping between revisions (as opposed to merging),
1419 # and if neither the working directory nor the target rev has
1419 # and if neither the working directory nor the target rev has
1420 # the file, then we need to remove it from the dirstate, to
1420 # the file, then we need to remove it from the dirstate, to
1421 # prevent the dirstate from listing the file when it is no
1421 # prevent the dirstate from listing the file when it is no
1422 # longer in the manifest.
1422 # longer in the manifest.
1423 if moddirstate and linear_path and f not in m2:
1423 if moddirstate and linear_path and f not in m2:
1424 self.dirstate.forget((f,))
1424 self.dirstate.forget((f,))
1425
1425
1426 # Compare manifests
1426 # Compare manifests
1427 for f, n in mw.iteritems():
1427 for f, n in mw.iteritems():
1428 if choose and not choose(f): continue
1428 if choose and not choose(f): continue
1429 if f in m2:
1429 if f in m2:
1430 s = 0
1430 s = 0
1431
1431
1432 # is the wfile new since m1, and match m2?
1432 # is the wfile new since m1, and match m2?
1433 if f not in m1:
1433 if f not in m1:
1434 t1 = self.wread(f)
1434 t1 = self.wread(f)
1435 t2 = self.file(f).read(m2[f])
1435 t2 = self.file(f).read(m2[f])
1436 if cmp(t1, t2) == 0:
1436 if cmp(t1, t2) == 0:
1437 n = m2[f]
1437 n = m2[f]
1438 del t1, t2
1438 del t1, t2
1439
1439
1440 # are files different?
1440 # are files different?
1441 if n != m2[f]:
1441 if n != m2[f]:
1442 a = ma.get(f, nullid)
1442 a = ma.get(f, nullid)
1443 # are both different from the ancestor?
1443 # are both different from the ancestor?
1444 if n != a and m2[f] != a:
1444 if n != a and m2[f] != a:
1445 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1445 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1446 # merge executable bits
1446 # merge executable bits
1447 # "if we changed or they changed, change in merge"
1447 # "if we changed or they changed, change in merge"
1448 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1448 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1449 mode = ((a^b) | (a^c)) ^ a
1449 mode = ((a^b) | (a^c)) ^ a
1450 merge[f] = (m1.get(f, nullid), m2[f], mode)
1450 merge[f] = (m1.get(f, nullid), m2[f], mode)
1451 s = 1
1451 s = 1
1452 # are we clobbering?
1452 # are we clobbering?
1453 # is remote's version newer?
1453 # is remote's version newer?
1454 # or are we going back in time?
1454 # or are we going back in time?
1455 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1455 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1456 self.ui.debug(_(" remote %s is newer, get\n") % f)
1456 self.ui.debug(_(" remote %s is newer, get\n") % f)
1457 get[f] = m2[f]
1457 get[f] = m2[f]
1458 s = 1
1458 s = 1
1459 elif f in umap:
1459 elif f in umap:
1460 # this unknown file is the same as the checkout
1460 # this unknown file is the same as the checkout
1461 get[f] = m2[f]
1461 get[f] = m2[f]
1462
1462
1463 if not s and mfw[f] != mf2[f]:
1463 if not s and mfw[f] != mf2[f]:
1464 if force:
1464 if force:
1465 self.ui.debug(_(" updating permissions for %s\n") % f)
1465 self.ui.debug(_(" updating permissions for %s\n") % f)
1466 util.set_exec(self.wjoin(f), mf2[f])
1466 util.set_exec(self.wjoin(f), mf2[f])
1467 else:
1467 else:
1468 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1468 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1469 mode = ((a^b) | (a^c)) ^ a
1469 mode = ((a^b) | (a^c)) ^ a
1470 if mode != b:
1470 if mode != b:
1471 self.ui.debug(_(" updating permissions for %s\n") % f)
1471 self.ui.debug(_(" updating permissions for %s\n") % f)
1472 util.set_exec(self.wjoin(f), mode)
1472 util.set_exec(self.wjoin(f), mode)
1473 del m2[f]
1473 del m2[f]
1474 elif f in ma:
1474 elif f in ma:
1475 if n != ma[f]:
1475 if n != ma[f]:
1476 r = _("d")
1476 r = _("d")
1477 if not force and (linear_path or allow):
1477 if not force and (linear_path or allow):
1478 r = self.ui.prompt(
1478 r = self.ui.prompt(
1479 (_(" local changed %s which remote deleted\n") % f) +
1479 (_(" local changed %s which remote deleted\n") % f) +
1480 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1480 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1481 if r == _("d"):
1481 if r == _("d"):
1482 remove.append(f)
1482 remove.append(f)
1483 else:
1483 else:
1484 self.ui.debug(_("other deleted %s\n") % f)
1484 self.ui.debug(_("other deleted %s\n") % f)
1485 remove.append(f) # other deleted it
1485 remove.append(f) # other deleted it
1486 else:
1486 else:
1487 # file is created on branch or in working directory
1487 # file is created on branch or in working directory
1488 if force and f not in umap:
1488 if force and f not in umap:
1489 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1489 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1490 remove.append(f)
1490 remove.append(f)
1491 elif n == m1.get(f, nullid): # same as parent
1491 elif n == m1.get(f, nullid): # same as parent
1492 if p2 == pa: # going backwards?
1492 if p2 == pa: # going backwards?
1493 self.ui.debug(_("remote deleted %s\n") % f)
1493 self.ui.debug(_("remote deleted %s\n") % f)
1494 remove.append(f)
1494 remove.append(f)
1495 else:
1495 else:
1496 self.ui.debug(_("local modified %s, keeping\n") % f)
1496 self.ui.debug(_("local modified %s, keeping\n") % f)
1497 else:
1497 else:
1498 self.ui.debug(_("working dir created %s, keeping\n") % f)
1498 self.ui.debug(_("working dir created %s, keeping\n") % f)
1499
1499
1500 for f, n in m2.iteritems():
1500 for f, n in m2.iteritems():
1501 if choose and not choose(f): continue
1501 if choose and not choose(f): continue
1502 if f[0] == "/": continue
1502 if f[0] == "/": continue
1503 if f in ma and n != ma[f]:
1503 if f in ma and n != ma[f]:
1504 r = _("k")
1504 r = _("k")
1505 if not force and (linear_path or allow):
1505 if not force and (linear_path or allow):
1506 r = self.ui.prompt(
1506 r = self.ui.prompt(
1507 (_("remote changed %s which local deleted\n") % f) +
1507 (_("remote changed %s which local deleted\n") % f) +
1508 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1508 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1509 if r == _("k"): get[f] = n
1509 if r == _("k"): get[f] = n
1510 elif f not in ma:
1510 elif f not in ma:
1511 self.ui.debug(_("remote created %s\n") % f)
1511 self.ui.debug(_("remote created %s\n") % f)
1512 get[f] = n
1512 get[f] = n
1513 else:
1513 else:
1514 if force or p2 == pa: # going backwards?
1514 if force or p2 == pa: # going backwards?
1515 self.ui.debug(_("local deleted %s, recreating\n") % f)
1515 self.ui.debug(_("local deleted %s, recreating\n") % f)
1516 get[f] = n
1516 get[f] = n
1517 else:
1517 else:
1518 self.ui.debug(_("local deleted %s\n") % f)
1518 self.ui.debug(_("local deleted %s\n") % f)
1519
1519
1520 del mw, m1, m2, ma
1520 del mw, m1, m2, ma
1521
1521
1522 if force:
1522 if force:
1523 for f in merge:
1523 for f in merge:
1524 get[f] = merge[f][1]
1524 get[f] = merge[f][1]
1525 merge = {}
1525 merge = {}
1526
1526
1527 if linear_path or force:
1527 if linear_path or force:
1528 # we don't need to do any magic, just jump to the new rev
1528 # we don't need to do any magic, just jump to the new rev
1529 branch_merge = False
1529 branch_merge = False
1530 p1, p2 = p2, nullid
1530 p1, p2 = p2, nullid
1531 else:
1531 else:
1532 if not allow:
1532 if not allow:
1533 self.ui.status(_("this update spans a branch"
1533 self.ui.status(_("this update spans a branch"
1534 " affecting the following files:\n"))
1534 " affecting the following files:\n"))
1535 fl = merge.keys() + get.keys()
1535 fl = merge.keys() + get.keys()
1536 fl.sort()
1536 fl.sort()
1537 for f in fl:
1537 for f in fl:
1538 cf = ""
1538 cf = ""
1539 if f in merge: cf = _(" (resolve)")
1539 if f in merge: cf = _(" (resolve)")
1540 self.ui.status(" %s%s\n" % (f, cf))
1540 self.ui.status(" %s%s\n" % (f, cf))
1541 self.ui.warn(_("aborting update spanning branches!\n"))
1541 self.ui.warn(_("aborting update spanning branches!\n"))
1542 self.ui.status(_("(use update -m to merge across branches"
1542 self.ui.status(_("(use update -m to merge across branches"
1543 " or -C to lose changes)\n"))
1543 " or -C to lose changes)\n"))
1544 return 1
1544 return 1
1545 branch_merge = True
1545 branch_merge = True
1546
1546
1547 # get the files we don't need to change
1547 # get the files we don't need to change
1548 files = get.keys()
1548 files = get.keys()
1549 files.sort()
1549 files.sort()
1550 for f in files:
1550 for f in files:
1551 if f[0] == "/": continue
1551 if f[0] == "/": continue
1552 self.ui.note(_("getting %s\n") % f)
1552 self.ui.note(_("getting %s\n") % f)
1553 t = self.file(f).read(get[f])
1553 t = self.file(f).read(get[f])
1554 self.wwrite(f, t)
1554 self.wwrite(f, t)
1555 util.set_exec(self.wjoin(f), mf2[f])
1555 util.set_exec(self.wjoin(f), mf2[f])
1556 if moddirstate:
1556 if moddirstate:
1557 if branch_merge:
1557 if branch_merge:
1558 self.dirstate.update([f], 'n', st_mtime=-1)
1558 self.dirstate.update([f], 'n', st_mtime=-1)
1559 else:
1559 else:
1560 self.dirstate.update([f], 'n')
1560 self.dirstate.update([f], 'n')
1561
1561
1562 # merge the tricky bits
1562 # merge the tricky bits
1563 files = merge.keys()
1563 files = merge.keys()
1564 files.sort()
1564 files.sort()
1565 for f in files:
1565 for f in files:
1566 self.ui.status(_("merging %s\n") % f)
1566 self.ui.status(_("merging %s\n") % f)
1567 my, other, flag = merge[f]
1567 my, other, flag = merge[f]
1568 self.merge3(f, my, other)
1568 self.merge3(f, my, other)
1569 util.set_exec(self.wjoin(f), flag)
1569 util.set_exec(self.wjoin(f), flag)
1570 if moddirstate:
1570 if moddirstate:
1571 if branch_merge:
1571 if branch_merge:
1572 # We've done a branch merge, mark this file as merged
1572 # We've done a branch merge, mark this file as merged
1573 # so that we properly record the merger later
1573 # so that we properly record the merger later
1574 self.dirstate.update([f], 'm')
1574 self.dirstate.update([f], 'm')
1575 else:
1575 else:
1576 # We've update-merged a locally modified file, so
1576 # We've update-merged a locally modified file, so
1577 # we set the dirstate to emulate a normal checkout
1577 # we set the dirstate to emulate a normal checkout
1578 # of that file some time in the past. Thus our
1578 # of that file some time in the past. Thus our
1579 # merge will appear as a normal local file
1579 # merge will appear as a normal local file
1580 # modification.
1580 # modification.
1581 f_len = len(self.file(f).read(other))
1581 f_len = len(self.file(f).read(other))
1582 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1582 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1583
1583
1584 remove.sort()
1584 remove.sort()
1585 for f in remove:
1585 for f in remove:
1586 self.ui.note(_("removing %s\n") % f)
1586 self.ui.note(_("removing %s\n") % f)
1587 try:
1587 try:
1588 util.unlink(self.wjoin(f))
1588 util.unlink(self.wjoin(f))
1589 except OSError, inst:
1589 except OSError, inst:
1590 if inst.errno != errno.ENOENT:
1590 if inst.errno != errno.ENOENT:
1591 self.ui.warn(_("update failed to remove %s: %s!\n") %
1591 self.ui.warn(_("update failed to remove %s: %s!\n") %
1592 (f, inst.strerror))
1592 (f, inst.strerror))
1593 if moddirstate:
1593 if moddirstate:
1594 if branch_merge:
1594 if branch_merge:
1595 self.dirstate.update(remove, 'r')
1595 self.dirstate.update(remove, 'r')
1596 else:
1596 else:
1597 self.dirstate.forget(remove)
1597 self.dirstate.forget(remove)
1598
1598
1599 if moddirstate:
1599 if moddirstate:
1600 self.dirstate.setparents(p1, p2)
1600 self.dirstate.setparents(p1, p2)
1601
1601
1602 def merge3(self, fn, my, other):
1602 def merge3(self, fn, my, other):
1603 """perform a 3-way merge in the working directory"""
1603 """perform a 3-way merge in the working directory"""
1604
1604
1605 def temp(prefix, node):
1605 def temp(prefix, node):
1606 pre = "%s~%s." % (os.path.basename(fn), prefix)
1606 pre = "%s~%s." % (os.path.basename(fn), prefix)
1607 (fd, name) = tempfile.mkstemp("", pre)
1607 (fd, name) = tempfile.mkstemp("", pre)
1608 f = os.fdopen(fd, "wb")
1608 f = os.fdopen(fd, "wb")
1609 self.wwrite(fn, fl.read(node), f)
1609 self.wwrite(fn, fl.read(node), f)
1610 f.close()
1610 f.close()
1611 return name
1611 return name
1612
1612
1613 fl = self.file(fn)
1613 fl = self.file(fn)
1614 base = fl.ancestor(my, other)
1614 base = fl.ancestor(my, other)
1615 a = self.wjoin(fn)
1615 a = self.wjoin(fn)
1616 b = temp("base", base)
1616 b = temp("base", base)
1617 c = temp("other", other)
1617 c = temp("other", other)
1618
1618
1619 self.ui.note(_("resolving %s\n") % fn)
1619 self.ui.note(_("resolving %s\n") % fn)
1620 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1620 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1621 (fn, short(my), short(other), short(base)))
1621 (fn, short(my), short(other), short(base)))
1622
1622
1623 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1623 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1624 or "hgmerge")
1624 or "hgmerge")
1625 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1625 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1626 if r:
1626 if r:
1627 self.ui.warn(_("merging %s failed!\n") % fn)
1627 self.ui.warn(_("merging %s failed!\n") % fn)
1628
1628
1629 os.unlink(b)
1629 os.unlink(b)
1630 os.unlink(c)
1630 os.unlink(c)
1631
1631
1632 def verify(self):
1632 def verify(self):
1633 filelinkrevs = {}
1633 filelinkrevs = {}
1634 filenodes = {}
1634 filenodes = {}
1635 changesets = revisions = files = 0
1635 changesets = revisions = files = 0
1636 errors = [0]
1636 errors = [0]
1637 neededmanifests = {}
1637 neededmanifests = {}
1638
1638
1639 def err(msg):
1639 def err(msg):
1640 self.ui.warn(msg + "\n")
1640 self.ui.warn(msg + "\n")
1641 errors[0] += 1
1641 errors[0] += 1
1642
1642
1643 seen = {}
1643 seen = {}
1644 self.ui.status(_("checking changesets\n"))
1644 self.ui.status(_("checking changesets\n"))
1645 d = self.changelog.checksize()
1645 d = self.changelog.checksize()
1646 if d:
1646 if d:
1647 err(_("changeset data short %d bytes") % d)
1647 err(_("changeset data short %d bytes") % d)
1648 for i in range(self.changelog.count()):
1648 for i in range(self.changelog.count()):
1649 changesets += 1
1649 changesets += 1
1650 n = self.changelog.node(i)
1650 n = self.changelog.node(i)
1651 l = self.changelog.linkrev(n)
1651 l = self.changelog.linkrev(n)
1652 if l != i:
1652 if l != i:
1653 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1653 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1654 if n in seen:
1654 if n in seen:
1655 err(_("duplicate changeset at revision %d") % i)
1655 err(_("duplicate changeset at revision %d") % i)
1656 seen[n] = 1
1656 seen[n] = 1
1657
1657
1658 for p in self.changelog.parents(n):
1658 for p in self.changelog.parents(n):
1659 if p not in self.changelog.nodemap:
1659 if p not in self.changelog.nodemap:
1660 err(_("changeset %s has unknown parent %s") %
1660 err(_("changeset %s has unknown parent %s") %
1661 (short(n), short(p)))
1661 (short(n), short(p)))
1662 try:
1662 try:
1663 changes = self.changelog.read(n)
1663 changes = self.changelog.read(n)
1664 except KeyboardInterrupt:
1664 except KeyboardInterrupt:
1665 self.ui.warn(_("interrupted"))
1665 self.ui.warn(_("interrupted"))
1666 raise
1666 raise
1667 except Exception, inst:
1667 except Exception, inst:
1668 err(_("unpacking changeset %s: %s") % (short(n), inst))
1668 err(_("unpacking changeset %s: %s") % (short(n), inst))
1669
1669
1670 neededmanifests[changes[0]] = n
1670 neededmanifests[changes[0]] = n
1671
1671
1672 for f in changes[3]:
1672 for f in changes[3]:
1673 filelinkrevs.setdefault(f, []).append(i)
1673 filelinkrevs.setdefault(f, []).append(i)
1674
1674
1675 seen = {}
1675 seen = {}
1676 self.ui.status(_("checking manifests\n"))
1676 self.ui.status(_("checking manifests\n"))
1677 d = self.manifest.checksize()
1677 d = self.manifest.checksize()
1678 if d:
1678 if d:
1679 err(_("manifest data short %d bytes") % d)
1679 err(_("manifest data short %d bytes") % d)
1680 for i in range(self.manifest.count()):
1680 for i in range(self.manifest.count()):
1681 n = self.manifest.node(i)
1681 n = self.manifest.node(i)
1682 l = self.manifest.linkrev(n)
1682 l = self.manifest.linkrev(n)
1683
1683
1684 if l < 0 or l >= self.changelog.count():
1684 if l < 0 or l >= self.changelog.count():
1685 err(_("bad manifest link (%d) at revision %d") % (l, i))
1685 err(_("bad manifest link (%d) at revision %d") % (l, i))
1686
1686
1687 if n in neededmanifests:
1687 if n in neededmanifests:
1688 del neededmanifests[n]
1688 del neededmanifests[n]
1689
1689
1690 if n in seen:
1690 if n in seen:
1691 err(_("duplicate manifest at revision %d") % i)
1691 err(_("duplicate manifest at revision %d") % i)
1692
1692
1693 seen[n] = 1
1693 seen[n] = 1
1694
1694
1695 for p in self.manifest.parents(n):
1695 for p in self.manifest.parents(n):
1696 if p not in self.manifest.nodemap:
1696 if p not in self.manifest.nodemap:
1697 err(_("manifest %s has unknown parent %s") %
1697 err(_("manifest %s has unknown parent %s") %
1698 (short(n), short(p)))
1698 (short(n), short(p)))
1699
1699
1700 try:
1700 try:
1701 delta = mdiff.patchtext(self.manifest.delta(n))
1701 delta = mdiff.patchtext(self.manifest.delta(n))
1702 except KeyboardInterrupt:
1702 except KeyboardInterrupt:
1703 self.ui.warn(_("interrupted"))
1703 self.ui.warn(_("interrupted"))
1704 raise
1704 raise
1705 except Exception, inst:
1705 except Exception, inst:
1706 err(_("unpacking manifest %s: %s") % (short(n), inst))
1706 err(_("unpacking manifest %s: %s") % (short(n), inst))
1707
1707
1708 ff = [ l.split('\0') for l in delta.splitlines() ]
1708 ff = [ l.split('\0') for l in delta.splitlines() ]
1709 for f, fn in ff:
1709 for f, fn in ff:
1710 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1710 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1711
1711
1712 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1712 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1713
1713
1714 for m,c in neededmanifests.items():
1714 for m,c in neededmanifests.items():
1715 err(_("Changeset %s refers to unknown manifest %s") %
1715 err(_("Changeset %s refers to unknown manifest %s") %
1716 (short(m), short(c)))
1716 (short(m), short(c)))
1717 del neededmanifests
1717 del neededmanifests
1718
1718
1719 for f in filenodes:
1719 for f in filenodes:
1720 if f not in filelinkrevs:
1720 if f not in filelinkrevs:
1721 err(_("file %s in manifest but not in changesets") % f)
1721 err(_("file %s in manifest but not in changesets") % f)
1722
1722
1723 for f in filelinkrevs:
1723 for f in filelinkrevs:
1724 if f not in filenodes:
1724 if f not in filenodes:
1725 err(_("file %s in changeset but not in manifest") % f)
1725 err(_("file %s in changeset but not in manifest") % f)
1726
1726
1727 self.ui.status(_("checking files\n"))
1727 self.ui.status(_("checking files\n"))
1728 ff = filenodes.keys()
1728 ff = filenodes.keys()
1729 ff.sort()
1729 ff.sort()
1730 for f in ff:
1730 for f in ff:
1731 if f == "/dev/null": continue
1731 if f == "/dev/null": continue
1732 files += 1
1732 files += 1
1733 fl = self.file(f)
1733 fl = self.file(f)
1734 d = fl.checksize()
1734 d = fl.checksize()
1735 if d:
1735 if d:
1736 err(_("%s file data short %d bytes") % (f, d))
1736 err(_("%s file data short %d bytes") % (f, d))
1737
1737
1738 nodes = { nullid: 1 }
1738 nodes = { nullid: 1 }
1739 seen = {}
1739 seen = {}
1740 for i in range(fl.count()):
1740 for i in range(fl.count()):
1741 revisions += 1
1741 revisions += 1
1742 n = fl.node(i)
1742 n = fl.node(i)
1743
1743
1744 if n in seen:
1744 if n in seen:
1745 err(_("%s: duplicate revision %d") % (f, i))
1745 err(_("%s: duplicate revision %d") % (f, i))
1746 if n not in filenodes[f]:
1746 if n not in filenodes[f]:
1747 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1747 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1748 else:
1748 else:
1749 del filenodes[f][n]
1749 del filenodes[f][n]
1750
1750
1751 flr = fl.linkrev(n)
1751 flr = fl.linkrev(n)
1752 if flr not in filelinkrevs[f]:
1752 if flr not in filelinkrevs[f]:
1753 err(_("%s:%s points to unexpected changeset %d")
1753 err(_("%s:%s points to unexpected changeset %d")
1754 % (f, short(n), flr))
1754 % (f, short(n), flr))
1755 else:
1755 else:
1756 filelinkrevs[f].remove(flr)
1756 filelinkrevs[f].remove(flr)
1757
1757
1758 # verify contents
1758 # verify contents
1759 try:
1759 try:
1760 t = fl.read(n)
1760 t = fl.read(n)
1761 except KeyboardInterrupt:
1761 except KeyboardInterrupt:
1762 self.ui.warn(_("interrupted"))
1762 self.ui.warn(_("interrupted"))
1763 raise
1763 raise
1764 except Exception, inst:
1764 except Exception, inst:
1765 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1765 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1766
1766
1767 # verify parents
1767 # verify parents
1768 (p1, p2) = fl.parents(n)
1768 (p1, p2) = fl.parents(n)
1769 if p1 not in nodes:
1769 if p1 not in nodes:
1770 err(_("file %s:%s unknown parent 1 %s") %
1770 err(_("file %s:%s unknown parent 1 %s") %
1771 (f, short(n), short(p1)))
1771 (f, short(n), short(p1)))
1772 if p2 not in nodes:
1772 if p2 not in nodes:
1773 err(_("file %s:%s unknown parent 2 %s") %
1773 err(_("file %s:%s unknown parent 2 %s") %
1774 (f, short(n), short(p1)))
1774 (f, short(n), short(p1)))
1775 nodes[n] = 1
1775 nodes[n] = 1
1776
1776
1777 # cross-check
1777 # cross-check
1778 for node in filenodes[f]:
1778 for node in filenodes[f]:
1779 err(_("node %s in manifests not in %s") % (hex(node), f))
1779 err(_("node %s in manifests not in %s") % (hex(node), f))
1780
1780
1781 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1781 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1782 (files, changesets, revisions))
1782 (files, changesets, revisions))
1783
1783
1784 if errors[0]:
1784 if errors[0]:
1785 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1785 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1786 return 1
1786 return 1
@@ -1,173 +1,174 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct
8 import struct
9 from revlog import *
9 from revlog import *
10 from i18n import gettext as _
10 from i18n import gettext as _
11 from demandload import *
11 from demandload import *
12 demandload(globals(), "bisect array")
12 demandload(globals(), "bisect array")
13
13
14 class manifest(revlog):
14 class manifest(revlog):
15 def __init__(self, opener):
15 def __init__(self, opener, local=True):
16 self.mapcache = None
16 self.mapcache = None
17 self.listcache = None
17 self.listcache = None
18 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
18 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
19 local=local)
19
20
20 def read(self, node):
21 def read(self, node):
21 if node == nullid: return {} # don't upset local cache
22 if node == nullid: return {} # don't upset local cache
22 if self.mapcache and self.mapcache[0] == node:
23 if self.mapcache and self.mapcache[0] == node:
23 return self.mapcache[1]
24 return self.mapcache[1]
24 text = self.revision(node)
25 text = self.revision(node)
25 map = {}
26 map = {}
26 flag = {}
27 flag = {}
27 self.listcache = array.array('c', text)
28 self.listcache = array.array('c', text)
28 lines = text.splitlines(1)
29 lines = text.splitlines(1)
29 for l in lines:
30 for l in lines:
30 (f, n) = l.split('\0')
31 (f, n) = l.split('\0')
31 map[f] = bin(n[:40])
32 map[f] = bin(n[:40])
32 flag[f] = (n[40:-1] == "x")
33 flag[f] = (n[40:-1] == "x")
33 self.mapcache = (node, map, flag)
34 self.mapcache = (node, map, flag)
34 return map
35 return map
35
36
36 def readflags(self, node):
37 def readflags(self, node):
37 if node == nullid: return {} # don't upset local cache
38 if node == nullid: return {} # don't upset local cache
38 if not self.mapcache or self.mapcache[0] != node:
39 if not self.mapcache or self.mapcache[0] != node:
39 self.read(node)
40 self.read(node)
40 return self.mapcache[2]
41 return self.mapcache[2]
41
42
42 def diff(self, a, b):
43 def diff(self, a, b):
43 return mdiff.textdiff(str(a), str(b))
44 return mdiff.textdiff(str(a), str(b))
44
45
45 def add(self, map, flags, transaction, link, p1=None, p2=None,
46 def add(self, map, flags, transaction, link, p1=None, p2=None,
46 changed=None):
47 changed=None):
47
48
48 # returns a tuple (start, end). If the string is found
49 # returns a tuple (start, end). If the string is found
49 # m[start:end] are the line containing that string. If start == end
50 # m[start:end] are the line containing that string. If start == end
50 # the string was not found and they indicate the proper sorted
51 # the string was not found and they indicate the proper sorted
51 # insertion point. This was taken from bisect_left, and modified
52 # insertion point. This was taken from bisect_left, and modified
52 # to find line start/end as it goes along.
53 # to find line start/end as it goes along.
53 #
54 #
54 # m should be a buffer or a string
55 # m should be a buffer or a string
55 # s is a string
56 # s is a string
56 #
57 #
57 def manifestsearch(m, s, lo=0, hi=None):
58 def manifestsearch(m, s, lo=0, hi=None):
58 def advance(i, c):
59 def advance(i, c):
59 while i < lenm and m[i] != c:
60 while i < lenm and m[i] != c:
60 i += 1
61 i += 1
61 return i
62 return i
62 lenm = len(m)
63 lenm = len(m)
63 if not hi:
64 if not hi:
64 hi = lenm
65 hi = lenm
65 while lo < hi:
66 while lo < hi:
66 mid = (lo + hi) // 2
67 mid = (lo + hi) // 2
67 start = mid
68 start = mid
68 while start > 0 and m[start-1] != '\n':
69 while start > 0 and m[start-1] != '\n':
69 start -= 1
70 start -= 1
70 end = advance(start, '\0')
71 end = advance(start, '\0')
71 if m[start:end] < s:
72 if m[start:end] < s:
72 # we know that after the null there are 40 bytes of sha1
73 # we know that after the null there are 40 bytes of sha1
73 # this translates to the bisect lo = mid + 1
74 # this translates to the bisect lo = mid + 1
74 lo = advance(end + 40, '\n') + 1
75 lo = advance(end + 40, '\n') + 1
75 else:
76 else:
76 # this translates to the bisect hi = mid
77 # this translates to the bisect hi = mid
77 hi = start
78 hi = start
78 end = advance(lo, '\0')
79 end = advance(lo, '\0')
79 found = m[lo:end]
80 found = m[lo:end]
80 if cmp(s, found) == 0:
81 if cmp(s, found) == 0:
81 # we know that after the null there are 40 bytes of sha1
82 # we know that after the null there are 40 bytes of sha1
82 end = advance(end + 40, '\n')
83 end = advance(end + 40, '\n')
83 return (lo, end+1)
84 return (lo, end+1)
84 else:
85 else:
85 return (lo, lo)
86 return (lo, lo)
86
87
87 # apply the changes collected during the bisect loop to our addlist
88 # apply the changes collected during the bisect loop to our addlist
88 # return a delta suitable for addrevision
89 # return a delta suitable for addrevision
89 def addlistdelta(addlist, x):
90 def addlistdelta(addlist, x):
90 # start from the bottom up
91 # start from the bottom up
91 # so changes to the offsets don't mess things up.
92 # so changes to the offsets don't mess things up.
92 i = len(x)
93 i = len(x)
93 while i > 0:
94 while i > 0:
94 i -= 1
95 i -= 1
95 start = x[i][0]
96 start = x[i][0]
96 end = x[i][1]
97 end = x[i][1]
97 if x[i][2]:
98 if x[i][2]:
98 addlist[start:end] = array.array('c', x[i][2])
99 addlist[start:end] = array.array('c', x[i][2])
99 else:
100 else:
100 del addlist[start:end]
101 del addlist[start:end]
101 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
102 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
102 for d in x ])
103 for d in x ])
103
104
104 # if we're using the listcache, make sure it is valid and
105 # if we're using the listcache, make sure it is valid and
105 # parented by the same node we're diffing against
106 # parented by the same node we're diffing against
106 if not changed or not self.listcache or not p1 or \
107 if not changed or not self.listcache or not p1 or \
107 self.mapcache[0] != p1:
108 self.mapcache[0] != p1:
108 files = map.keys()
109 files = map.keys()
109 files.sort()
110 files.sort()
110
111
111 text = ["%s\000%s%s\n" %
112 text = ["%s\000%s%s\n" %
112 (f, hex(map[f]), flags[f] and "x" or '')
113 (f, hex(map[f]), flags[f] and "x" or '')
113 for f in files]
114 for f in files]
114 self.listcache = array.array('c', "".join(text))
115 self.listcache = array.array('c', "".join(text))
115 cachedelta = None
116 cachedelta = None
116 else:
117 else:
117 addlist = self.listcache
118 addlist = self.listcache
118
119
119 # combine the changed lists into one list for sorting
120 # combine the changed lists into one list for sorting
120 work = [[x, 0] for x in changed[0]]
121 work = [[x, 0] for x in changed[0]]
121 work[len(work):] = [[x, 1] for x in changed[1]]
122 work[len(work):] = [[x, 1] for x in changed[1]]
122 work.sort()
123 work.sort()
123
124
124 delta = []
125 delta = []
125 dstart = None
126 dstart = None
126 dend = None
127 dend = None
127 dline = [""]
128 dline = [""]
128 start = 0
129 start = 0
129 # zero copy representation of addlist as a buffer
130 # zero copy representation of addlist as a buffer
130 addbuf = buffer(addlist)
131 addbuf = buffer(addlist)
131
132
132 # start with a readonly loop that finds the offset of
133 # start with a readonly loop that finds the offset of
133 # each line and creates the deltas
134 # each line and creates the deltas
134 for w in work:
135 for w in work:
135 f = w[0]
136 f = w[0]
136 # bs will either be the index of the item or the insert point
137 # bs will either be the index of the item or the insert point
137 start, end = manifestsearch(addbuf, f, start)
138 start, end = manifestsearch(addbuf, f, start)
138 if w[1] == 0:
139 if w[1] == 0:
139 l = "%s\000%s%s\n" % (f, hex(map[f]),
140 l = "%s\000%s%s\n" % (f, hex(map[f]),
140 flags[f] and "x" or '')
141 flags[f] and "x" or '')
141 else:
142 else:
142 l = ""
143 l = ""
143 if start == end and w[1] == 1:
144 if start == end and w[1] == 1:
144 # item we want to delete was not found, error out
145 # item we want to delete was not found, error out
145 raise AssertionError(
146 raise AssertionError(
146 _("failed to remove %s from manifest\n") % f)
147 _("failed to remove %s from manifest\n") % f)
147 if dstart != None and dstart <= start and dend >= start:
148 if dstart != None and dstart <= start and dend >= start:
148 if dend < end:
149 if dend < end:
149 dend = end
150 dend = end
150 if l:
151 if l:
151 dline.append(l)
152 dline.append(l)
152 else:
153 else:
153 if dstart != None:
154 if dstart != None:
154 delta.append([dstart, dend, "".join(dline)])
155 delta.append([dstart, dend, "".join(dline)])
155 dstart = start
156 dstart = start
156 dend = end
157 dend = end
157 dline = [l]
158 dline = [l]
158
159
159 if dstart != None:
160 if dstart != None:
160 delta.append([dstart, dend, "".join(dline)])
161 delta.append([dstart, dend, "".join(dline)])
161 # apply the delta to the addlist, and get a delta for addrevision
162 # apply the delta to the addlist, and get a delta for addrevision
162 cachedelta = addlistdelta(addlist, delta)
163 cachedelta = addlistdelta(addlist, delta)
163
164
164 # the delta is only valid if we've been processing the tip revision
165 # the delta is only valid if we've been processing the tip revision
165 if self.mapcache[0] != self.tip():
166 if self.mapcache[0] != self.tip():
166 cachedelta = None
167 cachedelta = None
167 self.listcache = addlist
168 self.listcache = addlist
168
169
169 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
170 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
170 p2, cachedelta)
171 p2, cachedelta)
171 self.mapcache = (n, map, flags)
172 self.mapcache = (n, map, flags)
172
173
173 return n
174 return n
@@ -1,893 +1,904 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
16 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
17
17
18 def hash(text, p1, p2):
18 def hash(text, p1, p2):
19 """generate a hash from the given text and its parent hashes
19 """generate a hash from the given text and its parent hashes
20
20
21 This hash combines both the current file contents and its history
21 This hash combines both the current file contents and its history
22 in a manner that makes it easy to distinguish nodes with the same
22 in a manner that makes it easy to distinguish nodes with the same
23 content in the revision graph.
23 content in the revision graph.
24 """
24 """
25 l = [p1, p2]
25 l = [p1, p2]
26 l.sort()
26 l.sort()
27 s = sha.new(l[0])
27 s = sha.new(l[0])
28 s.update(l[1])
28 s.update(l[1])
29 s.update(text)
29 s.update(text)
30 return s.digest()
30 return s.digest()
31
31
32 def compress(text):
32 def compress(text):
33 """ generate a possibly-compressed representation of text """
33 """ generate a possibly-compressed representation of text """
34 if not text: return ("", text)
34 if not text: return ("", text)
35 if len(text) < 44:
35 if len(text) < 44:
36 if text[0] == '\0': return ("", text)
36 if text[0] == '\0': return ("", text)
37 return ('u', text)
37 return ('u', text)
38 bin = zlib.compress(text)
38 bin = zlib.compress(text)
39 if len(bin) > len(text):
39 if len(bin) > len(text):
40 if text[0] == '\0': return ("", text)
40 if text[0] == '\0': return ("", text)
41 return ('u', text)
41 return ('u', text)
42 return ("", bin)
42 return ("", bin)
43
43
44 def decompress(bin):
44 def decompress(bin):
45 """ decompress the given input """
45 """ decompress the given input """
46 if not bin: return bin
46 if not bin: return bin
47 t = bin[0]
47 t = bin[0]
48 if t == '\0': return bin
48 if t == '\0': return bin
49 if t == 'x': return zlib.decompress(bin)
49 if t == 'x': return zlib.decompress(bin)
50 if t == 'u': return bin[1:]
50 if t == 'u': return bin[1:]
51 raise RevlogError(_("unknown compression type %s") % t)
51 raise RevlogError(_("unknown compression type %s") % t)
52
52
53 indexformat = ">4l20s20s20s"
53 indexformat = ">4l20s20s20s"
54
54
55 class lazyparser(object):
55 class lazyparser(object):
56 """
56 """
57 this class avoids the need to parse the entirety of large indices
57 this class avoids the need to parse the entirety of large indices
58
58
59 By default we parse and load 1000 entries at a time.
59 By default we parse and load 1000 entries at a time.
60
60
61 If no position is specified, we load the whole index, and replace
61 If no position is specified, we load the whole index, and replace
62 the lazy objects in revlog with the underlying objects for
62 the lazy objects in revlog with the underlying objects for
63 efficiency in cases where we look at most of the nodes.
63 efficiency in cases where we look at most of the nodes.
64 """
64 """
65 def __init__(self, data, revlog):
65 def __init__(self, data, revlog):
66 self.data = data
66 self.data = data
67 self.s = struct.calcsize(indexformat)
67 self.s = struct.calcsize(indexformat)
68 self.l = len(data)/self.s
68 self.l = len(data)/self.s
69 self.index = [None] * self.l
69 self.index = [None] * self.l
70 self.map = {nullid: -1}
70 self.map = {nullid: -1}
71 self.all = 0
71 self.all = 0
72 self.revlog = revlog
72 self.revlog = revlog
73
73
74 def trunc(self, pos):
74 def trunc(self, pos):
75 self.l = pos/self.s
75 self.l = pos/self.s
76
76
77 def load(self, pos=None):
77 def load(self, pos=None):
78 if self.all: return
78 if self.all: return
79 if pos is not None:
79 if pos is not None:
80 block = pos / 1000
80 block = pos / 1000
81 i = block * 1000
81 i = block * 1000
82 end = min(self.l, i + 1000)
82 end = min(self.l, i + 1000)
83 else:
83 else:
84 self.all = 1
84 self.all = 1
85 i = 0
85 i = 0
86 end = self.l
86 end = self.l
87 self.revlog.index = self.index
87 self.revlog.index = self.index
88 self.revlog.nodemap = self.map
88 self.revlog.nodemap = self.map
89
89
90 while i < end:
90 while i < end:
91 d = self.data[i * self.s: (i + 1) * self.s]
91 d = self.data[i * self.s: (i + 1) * self.s]
92 e = struct.unpack(indexformat, d)
92 e = struct.unpack(indexformat, d)
93 self.index[i] = e
93 self.index[i] = e
94 self.map[e[6]] = i
94 self.map[e[6]] = i
95 i += 1
95 i += 1
96
96
97 class lazyindex(object):
97 class lazyindex(object):
98 """a lazy version of the index array"""
98 """a lazy version of the index array"""
99 def __init__(self, parser):
99 def __init__(self, parser):
100 self.p = parser
100 self.p = parser
101 def __len__(self):
101 def __len__(self):
102 return len(self.p.index)
102 return len(self.p.index)
103 def load(self, pos):
103 def load(self, pos):
104 if pos < 0:
104 if pos < 0:
105 pos += len(self.p.index)
105 pos += len(self.p.index)
106 self.p.load(pos)
106 self.p.load(pos)
107 return self.p.index[pos]
107 return self.p.index[pos]
108 def __getitem__(self, pos):
108 def __getitem__(self, pos):
109 return self.p.index[pos] or self.load(pos)
109 return self.p.index[pos] or self.load(pos)
110 def __delitem__(self, pos):
110 def __delitem__(self, pos):
111 del self.p.index[pos]
111 del self.p.index[pos]
112 def append(self, e):
112 def append(self, e):
113 self.p.index.append(e)
113 self.p.index.append(e)
114 def trunc(self, pos):
114 def trunc(self, pos):
115 self.p.trunc(pos)
115 self.p.trunc(pos)
116
116
117 class lazymap(object):
117 class lazymap(object):
118 """a lazy version of the node map"""
118 """a lazy version of the node map"""
119 def __init__(self, parser):
119 def __init__(self, parser):
120 self.p = parser
120 self.p = parser
121 def load(self, key):
121 def load(self, key):
122 if self.p.all: return
122 if self.p.all: return
123 n = self.p.data.find(key)
123 n = self.p.data.find(key)
124 if n < 0:
124 if n < 0:
125 raise KeyError(key)
125 raise KeyError(key)
126 pos = n / self.p.s
126 pos = n / self.p.s
127 self.p.load(pos)
127 self.p.load(pos)
128 def __contains__(self, key):
128 def __contains__(self, key):
129 self.p.load()
129 self.p.load()
130 return key in self.p.map
130 return key in self.p.map
131 def __iter__(self):
131 def __iter__(self):
132 yield nullid
132 yield nullid
133 for i in xrange(self.p.l):
133 for i in xrange(self.p.l):
134 try:
134 try:
135 yield self.p.index[i][6]
135 yield self.p.index[i][6]
136 except:
136 except:
137 self.p.load(i)
137 self.p.load(i)
138 yield self.p.index[i][6]
138 yield self.p.index[i][6]
139 def __getitem__(self, key):
139 def __getitem__(self, key):
140 try:
140 try:
141 return self.p.map[key]
141 return self.p.map[key]
142 except KeyError:
142 except KeyError:
143 try:
143 try:
144 self.load(key)
144 self.load(key)
145 return self.p.map[key]
145 return self.p.map[key]
146 except KeyError:
146 except KeyError:
147 raise KeyError("node " + hex(key))
147 raise KeyError("node " + hex(key))
148 def __setitem__(self, key, val):
148 def __setitem__(self, key, val):
149 self.p.map[key] = val
149 self.p.map[key] = val
150 def __delitem__(self, key):
150 def __delitem__(self, key):
151 del self.p.map[key]
151 del self.p.map[key]
152
152
153 class RevlogError(Exception): pass
153 class RevlogError(Exception): pass
154
154
155 class revlog(object):
155 class revlog(object):
156 """
156 """
157 the underlying revision storage object
157 the underlying revision storage object
158
158
159 A revlog consists of two parts, an index and the revision data.
159 A revlog consists of two parts, an index and the revision data.
160
160
161 The index is a file with a fixed record size containing
161 The index is a file with a fixed record size containing
162 information on each revision, includings its nodeid (hash), the
162 information on each revision, includings its nodeid (hash), the
163 nodeids of its parents, the position and offset of its data within
163 nodeids of its parents, the position and offset of its data within
164 the data file, and the revision it's based on. Finally, each entry
164 the data file, and the revision it's based on. Finally, each entry
165 contains a linkrev entry that can serve as a pointer to external
165 contains a linkrev entry that can serve as a pointer to external
166 data.
166 data.
167
167
168 The revision data itself is a linear collection of data chunks.
168 The revision data itself is a linear collection of data chunks.
169 Each chunk represents a revision and is usually represented as a
169 Each chunk represents a revision and is usually represented as a
170 delta against the previous chunk. To bound lookup time, runs of
170 delta against the previous chunk. To bound lookup time, runs of
171 deltas are limited to about 2 times the length of the original
171 deltas are limited to about 2 times the length of the original
172 version data. This makes retrieval of a version proportional to
172 version data. This makes retrieval of a version proportional to
173 its size, or O(1) relative to the number of revisions.
173 its size, or O(1) relative to the number of revisions.
174
174
175 Both pieces of the revlog are written to in an append-only
175 Both pieces of the revlog are written to in an append-only
176 fashion, which means we never need to rewrite a file to insert or
176 fashion, which means we never need to rewrite a file to insert or
177 remove data, and can use some simple techniques to avoid the need
177 remove data, and can use some simple techniques to avoid the need
178 for locking while reading.
178 for locking while reading.
179 """
179 """
180 def __init__(self, opener, indexfile, datafile):
180 def __init__(self, opener, indexfile, datafile, local=True):
181 """
181 """
182 create a revlog object
182 create a revlog object
183
183
184 opener is a function that abstracts the file opening operation
184 opener is a function that abstracts the file opening operation
185 and can be used to implement COW semantics or the like.
185 and can be used to implement COW semantics or the like.
186 """
186 """
187 self.indexfile = indexfile
187 self.indexfile = indexfile
188 self.datafile = datafile
188 self.datafile = datafile
189 self.opener = opener
189 self.opener = opener
190 self.cache = None
190 self.cache = None
191 self.local = local # XXX only needed because statichttp
191
192
192 try:
193 try:
193 i = self.opener(self.indexfile).read()
194 i = self.opener(self.indexfile).read()
194 except IOError, inst:
195 except IOError, inst:
195 if inst.errno != errno.ENOENT:
196 if inst.errno != errno.ENOENT:
196 raise
197 raise
197 i = ""
198 i = ""
198
199
199 if len(i) > 10000:
200 if len(i) > 10000:
200 # big index, let's parse it on demand
201 # big index, let's parse it on demand
201 parser = lazyparser(i, self)
202 parser = lazyparser(i, self)
202 self.index = lazyindex(parser)
203 self.index = lazyindex(parser)
203 self.nodemap = lazymap(parser)
204 self.nodemap = lazymap(parser)
204 else:
205 else:
205 s = struct.calcsize(indexformat)
206 s = struct.calcsize(indexformat)
206 l = len(i) / s
207 l = len(i) / s
207 self.index = [None] * l
208 self.index = [None] * l
208 m = [None] * l
209 m = [None] * l
209
210
210 n = 0
211 n = 0
211 for f in xrange(0, len(i), s):
212 for f in xrange(0, len(i), s):
212 # offset, size, base, linkrev, p1, p2, nodeid
213 # offset, size, base, linkrev, p1, p2, nodeid
213 e = struct.unpack(indexformat, i[f:f + s])
214 e = struct.unpack(indexformat, i[f:f + s])
214 m[n] = (e[6], n)
215 m[n] = (e[6], n)
215 self.index[n] = e
216 self.index[n] = e
216 n += 1
217 n += 1
217
218
218 self.nodemap = dict(m)
219 self.nodemap = dict(m)
219 self.nodemap[nullid] = -1
220 self.nodemap[nullid] = -1
220
221
221 def tip(self): return self.node(len(self.index) - 1)
222 def tip(self): return self.node(len(self.index) - 1)
222 def count(self): return len(self.index)
223 def count(self): return len(self.index)
223 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
224 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
224 def rev(self, node):
225 def rev(self, node):
225 try:
226 try:
226 return self.nodemap[node]
227 return self.nodemap[node]
227 except KeyError:
228 except KeyError:
228 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
229 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
229 def linkrev(self, node): return self.index[self.rev(node)][3]
230 def linkrev(self, node): return self.index[self.rev(node)][3]
230 def parents(self, node):
231 def parents(self, node):
231 if node == nullid: return (nullid, nullid)
232 if node == nullid: return (nullid, nullid)
232 return self.index[self.rev(node)][4:6]
233 return self.index[self.rev(node)][4:6]
233
234
234 def start(self, rev): return self.index[rev][0]
235 def start(self, rev): return self.index[rev][0]
235 def length(self, rev): return self.index[rev][1]
236 def length(self, rev): return self.index[rev][1]
236 def end(self, rev): return self.start(rev) + self.length(rev)
237 def end(self, rev): return self.start(rev) + self.length(rev)
237 def base(self, rev): return self.index[rev][2]
238 def base(self, rev): return self.index[rev][2]
238
239
239 def reachable(self, rev, stop=None):
240 def reachable(self, rev, stop=None):
240 reachable = {}
241 reachable = {}
241 visit = [rev]
242 visit = [rev]
242 reachable[rev] = 1
243 reachable[rev] = 1
243 if stop:
244 if stop:
244 stopn = self.rev(stop)
245 stopn = self.rev(stop)
245 else:
246 else:
246 stopn = 0
247 stopn = 0
247 while visit:
248 while visit:
248 n = visit.pop(0)
249 n = visit.pop(0)
249 if n == stop:
250 if n == stop:
250 continue
251 continue
251 if n == nullid:
252 if n == nullid:
252 continue
253 continue
253 for p in self.parents(n):
254 for p in self.parents(n):
254 if self.rev(p) < stopn:
255 if self.rev(p) < stopn:
255 continue
256 continue
256 if p not in reachable:
257 if p not in reachable:
257 reachable[p] = 1
258 reachable[p] = 1
258 visit.append(p)
259 visit.append(p)
259 return reachable
260 return reachable
260
261
261 def nodesbetween(self, roots=None, heads=None):
262 def nodesbetween(self, roots=None, heads=None):
262 """Return a tuple containing three elements. Elements 1 and 2 contain
263 """Return a tuple containing three elements. Elements 1 and 2 contain
263 a final list bases and heads after all the unreachable ones have been
264 a final list bases and heads after all the unreachable ones have been
264 pruned. Element 0 contains a topologically sorted list of all
265 pruned. Element 0 contains a topologically sorted list of all
265
266
266 nodes that satisfy these constraints:
267 nodes that satisfy these constraints:
267 1. All nodes must be descended from a node in roots (the nodes on
268 1. All nodes must be descended from a node in roots (the nodes on
268 roots are considered descended from themselves).
269 roots are considered descended from themselves).
269 2. All nodes must also be ancestors of a node in heads (the nodes in
270 2. All nodes must also be ancestors of a node in heads (the nodes in
270 heads are considered to be their own ancestors).
271 heads are considered to be their own ancestors).
271
272
272 If roots is unspecified, nullid is assumed as the only root.
273 If roots is unspecified, nullid is assumed as the only root.
273 If heads is unspecified, it is taken to be the output of the
274 If heads is unspecified, it is taken to be the output of the
274 heads method (i.e. a list of all nodes in the repository that
275 heads method (i.e. a list of all nodes in the repository that
275 have no children)."""
276 have no children)."""
276 nonodes = ([], [], [])
277 nonodes = ([], [], [])
277 if roots is not None:
278 if roots is not None:
278 roots = list(roots)
279 roots = list(roots)
279 if not roots:
280 if not roots:
280 return nonodes
281 return nonodes
281 lowestrev = min([self.rev(n) for n in roots])
282 lowestrev = min([self.rev(n) for n in roots])
282 else:
283 else:
283 roots = [nullid] # Everybody's a descendent of nullid
284 roots = [nullid] # Everybody's a descendent of nullid
284 lowestrev = -1
285 lowestrev = -1
285 if (lowestrev == -1) and (heads is None):
286 if (lowestrev == -1) and (heads is None):
286 # We want _all_ the nodes!
287 # We want _all_ the nodes!
287 return ([self.node(r) for r in xrange(0, self.count())],
288 return ([self.node(r) for r in xrange(0, self.count())],
288 [nullid], list(self.heads()))
289 [nullid], list(self.heads()))
289 if heads is None:
290 if heads is None:
290 # All nodes are ancestors, so the latest ancestor is the last
291 # All nodes are ancestors, so the latest ancestor is the last
291 # node.
292 # node.
292 highestrev = self.count() - 1
293 highestrev = self.count() - 1
293 # Set ancestors to None to signal that every node is an ancestor.
294 # Set ancestors to None to signal that every node is an ancestor.
294 ancestors = None
295 ancestors = None
295 # Set heads to an empty dictionary for later discovery of heads
296 # Set heads to an empty dictionary for later discovery of heads
296 heads = {}
297 heads = {}
297 else:
298 else:
298 heads = list(heads)
299 heads = list(heads)
299 if not heads:
300 if not heads:
300 return nonodes
301 return nonodes
301 ancestors = {}
302 ancestors = {}
302 # Start at the top and keep marking parents until we're done.
303 # Start at the top and keep marking parents until we're done.
303 nodestotag = heads[:]
304 nodestotag = heads[:]
304 # Turn heads into a dictionary so we can remove 'fake' heads.
305 # Turn heads into a dictionary so we can remove 'fake' heads.
305 # Also, later we will be using it to filter out the heads we can't
306 # Also, later we will be using it to filter out the heads we can't
306 # find from roots.
307 # find from roots.
307 heads = dict.fromkeys(heads, 0)
308 heads = dict.fromkeys(heads, 0)
308 # Remember where the top was so we can use it as a limit later.
309 # Remember where the top was so we can use it as a limit later.
309 highestrev = max([self.rev(n) for n in nodestotag])
310 highestrev = max([self.rev(n) for n in nodestotag])
310 while nodestotag:
311 while nodestotag:
311 # grab a node to tag
312 # grab a node to tag
312 n = nodestotag.pop()
313 n = nodestotag.pop()
313 # Never tag nullid
314 # Never tag nullid
314 if n == nullid:
315 if n == nullid:
315 continue
316 continue
316 # A node's revision number represents its place in a
317 # A node's revision number represents its place in a
317 # topologically sorted list of nodes.
318 # topologically sorted list of nodes.
318 r = self.rev(n)
319 r = self.rev(n)
319 if r >= lowestrev:
320 if r >= lowestrev:
320 if n not in ancestors:
321 if n not in ancestors:
321 # If we are possibly a descendent of one of the roots
322 # If we are possibly a descendent of one of the roots
322 # and we haven't already been marked as an ancestor
323 # and we haven't already been marked as an ancestor
323 ancestors[n] = 1 # Mark as ancestor
324 ancestors[n] = 1 # Mark as ancestor
324 # Add non-nullid parents to list of nodes to tag.
325 # Add non-nullid parents to list of nodes to tag.
325 nodestotag.extend([p for p in self.parents(n) if
326 nodestotag.extend([p for p in self.parents(n) if
326 p != nullid])
327 p != nullid])
327 elif n in heads: # We've seen it before, is it a fake head?
328 elif n in heads: # We've seen it before, is it a fake head?
328 # So it is, real heads should not be the ancestors of
329 # So it is, real heads should not be the ancestors of
329 # any other heads.
330 # any other heads.
330 heads.pop(n)
331 heads.pop(n)
331 if not ancestors:
332 if not ancestors:
332 return nonodes
333 return nonodes
333 # Now that we have our set of ancestors, we want to remove any
334 # Now that we have our set of ancestors, we want to remove any
334 # roots that are not ancestors.
335 # roots that are not ancestors.
335
336
336 # If one of the roots was nullid, everything is included anyway.
337 # If one of the roots was nullid, everything is included anyway.
337 if lowestrev > -1:
338 if lowestrev > -1:
338 # But, since we weren't, let's recompute the lowest rev to not
339 # But, since we weren't, let's recompute the lowest rev to not
339 # include roots that aren't ancestors.
340 # include roots that aren't ancestors.
340
341
341 # Filter out roots that aren't ancestors of heads
342 # Filter out roots that aren't ancestors of heads
342 roots = [n for n in roots if n in ancestors]
343 roots = [n for n in roots if n in ancestors]
343 # Recompute the lowest revision
344 # Recompute the lowest revision
344 if roots:
345 if roots:
345 lowestrev = min([self.rev(n) for n in roots])
346 lowestrev = min([self.rev(n) for n in roots])
346 else:
347 else:
347 # No more roots? Return empty list
348 # No more roots? Return empty list
348 return nonodes
349 return nonodes
349 else:
350 else:
350 # We are descending from nullid, and don't need to care about
351 # We are descending from nullid, and don't need to care about
351 # any other roots.
352 # any other roots.
352 lowestrev = -1
353 lowestrev = -1
353 roots = [nullid]
354 roots = [nullid]
354 # Transform our roots list into a 'set' (i.e. a dictionary where the
355 # Transform our roots list into a 'set' (i.e. a dictionary where the
355 # values don't matter.
356 # values don't matter.
356 descendents = dict.fromkeys(roots, 1)
357 descendents = dict.fromkeys(roots, 1)
357 # Also, keep the original roots so we can filter out roots that aren't
358 # Also, keep the original roots so we can filter out roots that aren't
358 # 'real' roots (i.e. are descended from other roots).
359 # 'real' roots (i.e. are descended from other roots).
359 roots = descendents.copy()
360 roots = descendents.copy()
360 # Our topologically sorted list of output nodes.
361 # Our topologically sorted list of output nodes.
361 orderedout = []
362 orderedout = []
362 # Don't start at nullid since we don't want nullid in our output list,
363 # Don't start at nullid since we don't want nullid in our output list,
363 # and if nullid shows up in descedents, empty parents will look like
364 # and if nullid shows up in descedents, empty parents will look like
364 # they're descendents.
365 # they're descendents.
365 for r in xrange(max(lowestrev, 0), highestrev + 1):
366 for r in xrange(max(lowestrev, 0), highestrev + 1):
366 n = self.node(r)
367 n = self.node(r)
367 isdescendent = False
368 isdescendent = False
368 if lowestrev == -1: # Everybody is a descendent of nullid
369 if lowestrev == -1: # Everybody is a descendent of nullid
369 isdescendent = True
370 isdescendent = True
370 elif n in descendents:
371 elif n in descendents:
371 # n is already a descendent
372 # n is already a descendent
372 isdescendent = True
373 isdescendent = True
373 # This check only needs to be done here because all the roots
374 # This check only needs to be done here because all the roots
374 # will start being marked is descendents before the loop.
375 # will start being marked is descendents before the loop.
375 if n in roots:
376 if n in roots:
376 # If n was a root, check if it's a 'real' root.
377 # If n was a root, check if it's a 'real' root.
377 p = tuple(self.parents(n))
378 p = tuple(self.parents(n))
378 # If any of its parents are descendents, it's not a root.
379 # If any of its parents are descendents, it's not a root.
379 if (p[0] in descendents) or (p[1] in descendents):
380 if (p[0] in descendents) or (p[1] in descendents):
380 roots.pop(n)
381 roots.pop(n)
381 else:
382 else:
382 p = tuple(self.parents(n))
383 p = tuple(self.parents(n))
383 # A node is a descendent if either of its parents are
384 # A node is a descendent if either of its parents are
384 # descendents. (We seeded the dependents list with the roots
385 # descendents. (We seeded the dependents list with the roots
385 # up there, remember?)
386 # up there, remember?)
386 if (p[0] in descendents) or (p[1] in descendents):
387 if (p[0] in descendents) or (p[1] in descendents):
387 descendents[n] = 1
388 descendents[n] = 1
388 isdescendent = True
389 isdescendent = True
389 if isdescendent and ((ancestors is None) or (n in ancestors)):
390 if isdescendent and ((ancestors is None) or (n in ancestors)):
390 # Only include nodes that are both descendents and ancestors.
391 # Only include nodes that are both descendents and ancestors.
391 orderedout.append(n)
392 orderedout.append(n)
392 if (ancestors is not None) and (n in heads):
393 if (ancestors is not None) and (n in heads):
393 # We're trying to figure out which heads are reachable
394 # We're trying to figure out which heads are reachable
394 # from roots.
395 # from roots.
395 # Mark this head as having been reached
396 # Mark this head as having been reached
396 heads[n] = 1
397 heads[n] = 1
397 elif ancestors is None:
398 elif ancestors is None:
398 # Otherwise, we're trying to discover the heads.
399 # Otherwise, we're trying to discover the heads.
399 # Assume this is a head because if it isn't, the next step
400 # Assume this is a head because if it isn't, the next step
400 # will eventually remove it.
401 # will eventually remove it.
401 heads[n] = 1
402 heads[n] = 1
402 # But, obviously its parents aren't.
403 # But, obviously its parents aren't.
403 for p in self.parents(n):
404 for p in self.parents(n):
404 heads.pop(p, None)
405 heads.pop(p, None)
405 heads = [n for n in heads.iterkeys() if heads[n] != 0]
406 heads = [n for n in heads.iterkeys() if heads[n] != 0]
406 roots = roots.keys()
407 roots = roots.keys()
407 assert orderedout
408 assert orderedout
408 assert roots
409 assert roots
409 assert heads
410 assert heads
410 return (orderedout, roots, heads)
411 return (orderedout, roots, heads)
411
412
412 def heads(self, start=None):
413 def heads(self, start=None):
413 """return the list of all nodes that have no children
414 """return the list of all nodes that have no children
414
415
415 if start is specified, only heads that are descendants of
416 if start is specified, only heads that are descendants of
416 start will be returned
417 start will be returned
417
418
418 """
419 """
419 if start is None:
420 if start is None:
420 start = nullid
421 start = nullid
421 reachable = {start: 1}
422 reachable = {start: 1}
422 heads = {start: 1}
423 heads = {start: 1}
423 startrev = self.rev(start)
424 startrev = self.rev(start)
424
425
425 for r in xrange(startrev + 1, self.count()):
426 for r in xrange(startrev + 1, self.count()):
426 n = self.node(r)
427 n = self.node(r)
427 for pn in self.parents(n):
428 for pn in self.parents(n):
428 if pn in reachable:
429 if pn in reachable:
429 reachable[n] = 1
430 reachable[n] = 1
430 heads[n] = 1
431 heads[n] = 1
431 if pn in heads:
432 if pn in heads:
432 del heads[pn]
433 del heads[pn]
433 return heads.keys()
434 return heads.keys()
434
435
435 def children(self, node):
436 def children(self, node):
436 """find the children of a given node"""
437 """find the children of a given node"""
437 c = []
438 c = []
438 p = self.rev(node)
439 p = self.rev(node)
439 for r in range(p + 1, self.count()):
440 for r in range(p + 1, self.count()):
440 n = self.node(r)
441 n = self.node(r)
441 for pn in self.parents(n):
442 for pn in self.parents(n):
442 if pn == node:
443 if pn == node:
443 c.append(n)
444 c.append(n)
444 continue
445 continue
445 elif pn == nullid:
446 elif pn == nullid:
446 continue
447 continue
447 return c
448 return c
448
449
449 def lookup(self, id):
450 def lookup(self, id):
450 """locate a node based on revision number or subset of hex nodeid"""
451 """locate a node based on revision number or subset of hex nodeid"""
451 try:
452 try:
452 rev = int(id)
453 rev = int(id)
453 if str(rev) != id: raise ValueError
454 if str(rev) != id: raise ValueError
454 if rev < 0: rev = self.count() + rev
455 if rev < 0: rev = self.count() + rev
455 if rev < 0 or rev >= self.count(): raise ValueError
456 if rev < 0 or rev >= self.count(): raise ValueError
456 return self.node(rev)
457 return self.node(rev)
457 except (ValueError, OverflowError):
458 except (ValueError, OverflowError):
458 c = []
459 c = []
459 for n in self.nodemap:
460 for n in self.nodemap:
460 if hex(n).startswith(id):
461 if hex(n).startswith(id):
461 c.append(n)
462 c.append(n)
462 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
463 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
463 if len(c) < 1: raise RevlogError(_("No match found"))
464 if len(c) < 1: raise RevlogError(_("No match found"))
464 return c[0]
465 return c[0]
465
466
466 return None
467 return None
467
468
468 def diff(self, a, b):
469 def diff(self, a, b):
469 """return a delta between two revisions"""
470 """return a delta between two revisions"""
470 return mdiff.textdiff(a, b)
471 return mdiff.textdiff(a, b)
471
472
472 def patches(self, t, pl):
473 def patches(self, t, pl):
473 """apply a list of patches to a string"""
474 """apply a list of patches to a string"""
474 return mdiff.patches(t, pl)
475 return mdiff.patches(t, pl)
475
476
476 def delta(self, node):
477 def delta(self, node):
477 """return or calculate a delta between a node and its predecessor"""
478 """return or calculate a delta between a node and its predecessor"""
478 r = self.rev(node)
479 r = self.rev(node)
479 b = self.base(r)
480 b = self.base(r)
480 if r == b:
481 if r == b:
481 return self.diff(self.revision(self.node(r - 1)),
482 return self.diff(self.revision(self.node(r - 1)),
482 self.revision(node))
483 self.revision(node))
483 else:
484 else:
484 f = self.opener(self.datafile)
485 f = self.opener(self.datafile)
485 f.seek(self.start(r))
486 f.seek(self.start(r))
486 data = f.read(self.length(r))
487 data = f.read(self.length(r))
487 return decompress(data)
488 return decompress(data)
488
489
489 def revision(self, node):
490 def revision(self, node):
490 """return an uncompressed revision of a given"""
491 """return an uncompressed revision of a given"""
491 if node == nullid: return ""
492 if node == nullid: return ""
492 if self.cache and self.cache[0] == node: return self.cache[2]
493 if self.cache and self.cache[0] == node: return self.cache[2]
493
494
494 # look up what we need to read
495 # look up what we need to read
495 text = None
496 text = None
496 rev = self.rev(node)
497 rev = self.rev(node)
497 start, length, base, link, p1, p2, node = self.index[rev]
498 start, length, base, link, p1, p2, node = self.index[rev]
498 end = start + length
499 end = start + length
499 if base != rev: start = self.start(base)
500 if base != rev: start = self.start(base)
500
501
501 # do we have useful data cached?
502 # do we have useful data cached?
502 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
503 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
503 base = self.cache[1]
504 base = self.cache[1]
504 start = self.start(base + 1)
505 start = self.start(base + 1)
505 text = self.cache[2]
506 text = self.cache[2]
506 last = 0
507 last = 0
507
508
508 f = self.opener(self.datafile)
509 f = self.opener(self.datafile)
509 f.seek(start)
510 f.seek(start)
510 data = f.read(end - start)
511 data = f.read(end - start)
511
512
512 if text is None:
513 if text is None:
513 last = self.length(base)
514 last = self.length(base)
514 text = decompress(data[:last])
515 text = decompress(data[:last])
515
516
516 bins = []
517 bins = []
517 for r in xrange(base + 1, rev + 1):
518 for r in xrange(base + 1, rev + 1):
518 s = self.length(r)
519 s = self.length(r)
519 bins.append(decompress(data[last:last + s]))
520 bins.append(decompress(data[last:last + s]))
520 last = last + s
521 last = last + s
521
522
522 text = mdiff.patches(text, bins)
523 text = mdiff.patches(text, bins)
523
524
524 if node != hash(text, p1, p2):
525 if node != hash(text, p1, p2):
525 raise RevlogError(_("integrity check failed on %s:%d")
526 raise RevlogError(_("integrity check failed on %s:%d")
526 % (self.datafile, rev))
527 % (self.datafile, rev))
527
528
528 self.cache = (node, rev, text)
529 self.cache = (node, rev, text)
529 return text
530 return text
530
531
531 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
532 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
532 """add a revision to the log
533 """add a revision to the log
533
534
534 text - the revision data to add
535 text - the revision data to add
535 transaction - the transaction object used for rollback
536 transaction - the transaction object used for rollback
536 link - the linkrev data to add
537 link - the linkrev data to add
537 p1, p2 - the parent nodeids of the revision
538 p1, p2 - the parent nodeids of the revision
538 d - an optional precomputed delta
539 d - an optional precomputed delta
539 """
540 """
540 if text is None: text = ""
541 if text is None: text = ""
541 if p1 is None: p1 = self.tip()
542 if p1 is None: p1 = self.tip()
542 if p2 is None: p2 = nullid
543 if p2 is None: p2 = nullid
543
544
544 node = hash(text, p1, p2)
545 node = hash(text, p1, p2)
545
546
546 if node in self.nodemap:
547 if node in self.nodemap:
547 return node
548 return node
548
549
549 n = self.count()
550 n = self.count()
550 t = n - 1
551 t = n - 1
551
552
552 if n:
553 if n:
553 base = self.base(t)
554 base = self.base(t)
554 start = self.start(base)
555 start = self.start(base)
555 end = self.end(t)
556 end = self.end(t)
556 if not d:
557 if not d:
557 prev = self.revision(self.tip())
558 prev = self.revision(self.tip())
558 d = self.diff(prev, str(text))
559 d = self.diff(prev, str(text))
559 data = compress(d)
560 data = compress(d)
560 l = len(data[1]) + len(data[0])
561 l = len(data[1]) + len(data[0])
561 dist = end - start + l
562 dist = end - start + l
562
563
563 # full versions are inserted when the needed deltas
564 # full versions are inserted when the needed deltas
564 # become comparable to the uncompressed text
565 # become comparable to the uncompressed text
565 if not n or dist > len(text) * 2:
566 if not n or dist > len(text) * 2:
566 data = compress(text)
567 data = compress(text)
567 l = len(data[1]) + len(data[0])
568 l = len(data[1]) + len(data[0])
568 base = n
569 base = n
569 else:
570 else:
570 base = self.base(t)
571 base = self.base(t)
571
572
572 offset = 0
573 offset = 0
573 if t >= 0:
574 if t >= 0:
574 offset = self.end(t)
575 offset = self.end(t)
575
576
576 e = (offset, l, base, link, p1, p2, node)
577 e = (offset, l, base, link, p1, p2, node)
577
578
578 self.index.append(e)
579 self.index.append(e)
579 self.nodemap[node] = n
580 self.nodemap[node] = n
580 entry = struct.pack(indexformat, *e)
581 entry = struct.pack(indexformat, *e)
581
582
582 transaction.add(self.datafile, e[0])
583 transaction.add(self.datafile, e[0])
583 f = self.opener(self.datafile, "a")
584 f = self.opener(self.datafile, "a")
584 if data[0]:
585 if data[0]:
585 f.write(data[0])
586 f.write(data[0])
586 f.write(data[1])
587 f.write(data[1])
587 transaction.add(self.indexfile, n * len(entry))
588 transaction.add(self.indexfile, n * len(entry))
588 self.opener(self.indexfile, "a").write(entry)
589 self.opener(self.indexfile, "a").write(entry)
589
590
590 self.cache = (node, n, text)
591 self.cache = (node, n, text)
591 return node
592 return node
592
593
593 def ancestor(self, a, b):
594 def ancestor(self, a, b):
594 """calculate the least common ancestor of nodes a and b"""
595 """calculate the least common ancestor of nodes a and b"""
595 # calculate the distance of every node from root
596 # calculate the distance of every node from root
596 dist = {nullid: 0}
597 dist = {nullid: 0}
597 for i in xrange(self.count()):
598 for i in xrange(self.count()):
598 n = self.node(i)
599 n = self.node(i)
599 p1, p2 = self.parents(n)
600 p1, p2 = self.parents(n)
600 dist[n] = max(dist[p1], dist[p2]) + 1
601 dist[n] = max(dist[p1], dist[p2]) + 1
601
602
602 # traverse ancestors in order of decreasing distance from root
603 # traverse ancestors in order of decreasing distance from root
603 def ancestors(node):
604 def ancestors(node):
604 # we store negative distances because heap returns smallest member
605 # we store negative distances because heap returns smallest member
605 h = [(-dist[node], node)]
606 h = [(-dist[node], node)]
606 seen = {}
607 seen = {}
607 earliest = self.count()
608 earliest = self.count()
608 while h:
609 while h:
609 d, n = heapq.heappop(h)
610 d, n = heapq.heappop(h)
610 if n not in seen:
611 if n not in seen:
611 seen[n] = 1
612 seen[n] = 1
612 r = self.rev(n)
613 r = self.rev(n)
613 yield (-d, n)
614 yield (-d, n)
614 for p in self.parents(n):
615 for p in self.parents(n):
615 heapq.heappush(h, (-dist[p], p))
616 heapq.heappush(h, (-dist[p], p))
616
617
617 def generations(node):
618 def generations(node):
618 sg, s = None, {}
619 sg, s = None, {}
619 for g,n in ancestors(node):
620 for g,n in ancestors(node):
620 if g != sg:
621 if g != sg:
621 if sg:
622 if sg:
622 yield sg, s
623 yield sg, s
623 sg, s = g, {n:1}
624 sg, s = g, {n:1}
624 else:
625 else:
625 s[n] = 1
626 s[n] = 1
626 yield sg, s
627 yield sg, s
627
628
628 x = generations(a)
629 x = generations(a)
629 y = generations(b)
630 y = generations(b)
630 gx = x.next()
631 gx = x.next()
631 gy = y.next()
632 gy = y.next()
632
633
633 # increment each ancestor list until it is closer to root than
634 # increment each ancestor list until it is closer to root than
634 # the other, or they match
635 # the other, or they match
635 while 1:
636 while 1:
636 #print "ancestor gen %s %s" % (gx[0], gy[0])
637 #print "ancestor gen %s %s" % (gx[0], gy[0])
637 if gx[0] == gy[0]:
638 if gx[0] == gy[0]:
638 # find the intersection
639 # find the intersection
639 i = [ n for n in gx[1] if n in gy[1] ]
640 i = [ n for n in gx[1] if n in gy[1] ]
640 if i:
641 if i:
641 return i[0]
642 return i[0]
642 else:
643 else:
643 #print "next"
644 #print "next"
644 gy = y.next()
645 gy = y.next()
645 gx = x.next()
646 gx = x.next()
646 elif gx[0] < gy[0]:
647 elif gx[0] < gy[0]:
647 #print "next y"
648 #print "next y"
648 gy = y.next()
649 gy = y.next()
649 else:
650 else:
650 #print "next x"
651 #print "next x"
651 gx = x.next()
652 gx = x.next()
652
653
653 def group(self, nodelist, lookup, infocollect = None):
654 def group(self, nodelist, lookup, infocollect=None):
654 """calculate a delta group
655 """calculate a delta group
655
656
656 Given a list of changeset revs, return a set of deltas and
657 Given a list of changeset revs, return a set of deltas and
657 metadata corresponding to nodes. the first delta is
658 metadata corresponding to nodes. the first delta is
658 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
659 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
659 have this parent as it has all history before these
660 have this parent as it has all history before these
660 changesets. parent is parent[0]
661 changesets. parent is parent[0]
661 """
662 """
662 revs = [self.rev(n) for n in nodelist]
663 revs = [self.rev(n) for n in nodelist]
663 needed = dict.fromkeys(revs, 1)
664
664
665 # if we don't have any revisions touched by these changesets, bail
665 # if we don't have any revisions touched by these changesets, bail
666 if not revs:
666 if not revs:
667 yield struct.pack(">l", 0)
667 yield struct.pack(">l", 0)
668 return
668 return
669
669
670 # add the parent of the first rev
670 # add the parent of the first rev
671 p = self.parents(self.node(revs[0]))[0]
671 p = self.parents(self.node(revs[0]))[0]
672 revs.insert(0, self.rev(p))
672 revs.insert(0, self.rev(p))
673
673
674 # for each delta that isn't contiguous in the log, we need to
674 if self.local:
675 # reconstruct the base, reconstruct the result, and then
675 mm = self.opener(self.datafile)
676 # calculate the delta. We also need to do this where we've
676 def chunk(r):
677 # stored a full version and not a delta
677 o = self.start(r)
678 for i in xrange(0, len(revs) - 1):
678 l = self.length(r)
679 a, b = revs[i], revs[i + 1]
679 mm.seek(o)
680 if a + 1 != b or self.base(b) == b:
680 return decompress(mm.read(l))
681 for j in xrange(self.base(a), a + 1):
681 else:
682 needed[j] = 1
682 # XXX: statichttp workaround
683 for j in xrange(self.base(b), b + 1):
683 needed = dict.fromkeys(revs[1:], 1)
684 needed[j] = 1
684 # for each delta that isn't contiguous in the log, we need to
685 # reconstruct the base, reconstruct the result, and then
686 # calculate the delta. We also need to do this where we've
687 # stored a full version and not a delta
688 for i in xrange(0, len(revs) - 1):
689 a, b = revs[i], revs[i + 1]
690 if a + 1 != b or self.base(b) == b:
691 for j in xrange(self.base(a), a + 1):
692 needed[j] = 1
693 for j in xrange(self.base(b), b + 1):
694 needed[j] = 1
685
695
686 # calculate spans to retrieve from datafile
696 # calculate spans to retrieve from datafile
687 needed = needed.keys()
697 needed = needed.keys()
688 needed.sort()
698 needed.sort()
689 spans = []
699 spans = []
690 oo = -1
700 oo = -1
691 ol = 0
701 ol = 0
692 for n in needed:
702 for n in needed:
693 if n < 0: continue
703 if n < 0: continue
694 o = self.start(n)
704 o = self.start(n)
695 l = self.length(n)
705 l = self.length(n)
696 if oo + ol == o: # can we merge with the previous?
706 if oo + ol == o: # can we merge with the previous?
697 nl = spans[-1][2]
707 nl = spans[-1][2]
698 nl.append((n, l))
708 nl.append((n, l))
699 ol += l
709 ol += l
700 spans[-1] = (oo, ol, nl)
710 spans[-1] = (oo, ol, nl)
701 else:
711 else:
702 oo = o
712 oo = o
703 ol = l
713 ol = l
704 spans.append((oo, ol, [(n, l)]))
714 spans.append((oo, ol, [(n, l)]))
705
715
706 # read spans in, divide up chunks
716 # read spans in, divide up chunks
707 chunks = {}
717 chunks = {}
708 for span in spans:
718 for span in spans:
709 # we reopen the file for each span to make http happy for now
719 # we reopen the file for each span to make http happy for now
710 f = self.opener(self.datafile)
720 f = self.opener(self.datafile)
711 f.seek(span[0])
721 f.seek(span[0])
712 data = f.read(span[1])
722 data = f.read(span[1])
713
723
714 # divide up the span
724 # divide up the span
715 pos = 0
725 pos = 0
716 for r, l in span[2]:
726 for r, l in span[2]:
717 chunks[r] = decompress(data[pos: pos + l])
727 chunks[r] = decompress(data[pos: pos + l])
718 pos += l
728 pos += l
729 def chunk(r):
730 return chunks[r]
719
731
720 # helper to reconstruct intermediate versions
732 # helper to reconstruct intermediate versions
721 def construct(text, base, rev):
733 def construct(text, base, rev):
722 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
734 bins = [chunk(r) for r in xrange(base + 1, rev + 1)]
723 return mdiff.patches(text, bins)
735 return mdiff.patches(text, bins)
724
736
725 # build deltas
737 # build deltas
726 deltas = []
727 for d in xrange(0, len(revs) - 1):
738 for d in xrange(0, len(revs) - 1):
728 a, b = revs[d], revs[d + 1]
739 a, b = revs[d], revs[d + 1]
729 n = self.node(b)
740 n = self.node(b)
730
741
731 if infocollect is not None:
742 if infocollect is not None:
732 infocollect(n)
743 infocollect(n)
733
744
734 # do we need to construct a new delta?
745 # do we need to construct a new delta?
735 if a + 1 != b or self.base(b) == b:
746 if a + 1 != b or self.base(b) == b:
736 if a >= 0:
747 if a >= 0:
737 base = self.base(a)
748 base = self.base(a)
738 ta = chunks[self.base(a)]
749 ta = chunk(self.base(a))
739 ta = construct(ta, base, a)
750 ta = construct(ta, base, a)
740 else:
751 else:
741 ta = ""
752 ta = ""
742
753
743 base = self.base(b)
754 base = self.base(b)
744 if a > base:
755 if a > base:
745 base = a
756 base = a
746 tb = ta
757 tb = ta
747 else:
758 else:
748 tb = chunks[self.base(b)]
759 tb = chunk(self.base(b))
749 tb = construct(tb, base, b)
760 tb = construct(tb, base, b)
750 d = self.diff(ta, tb)
761 d = self.diff(ta, tb)
751 else:
762 else:
752 d = chunks[b]
763 d = chunk(b)
753
764
754 p = self.parents(n)
765 p = self.parents(n)
755 meta = n + p[0] + p[1] + lookup(n)
766 meta = n + p[0] + p[1] + lookup(n)
756 l = struct.pack(">l", len(meta) + len(d) + 4)
767 l = struct.pack(">l", len(meta) + len(d) + 4)
757 yield l
768 yield l
758 yield meta
769 yield meta
759 yield d
770 yield d
760
771
761 yield struct.pack(">l", 0)
772 yield struct.pack(">l", 0)
762
773
763 def addgroup(self, revs, linkmapper, transaction, unique=0):
774 def addgroup(self, revs, linkmapper, transaction, unique=0):
764 """
775 """
765 add a delta group
776 add a delta group
766
777
767 given a set of deltas, add them to the revision log. the
778 given a set of deltas, add them to the revision log. the
768 first delta is against its parent, which should be in our
779 first delta is against its parent, which should be in our
769 log, the rest are against the previous delta.
780 log, the rest are against the previous delta.
770 """
781 """
771
782
772 #track the base of the current delta log
783 #track the base of the current delta log
773 r = self.count()
784 r = self.count()
774 t = r - 1
785 t = r - 1
775 node = nullid
786 node = nullid
776
787
777 base = prev = -1
788 base = prev = -1
778 start = end = measure = 0
789 start = end = measure = 0
779 if r:
790 if r:
780 start = self.start(self.base(t))
791 start = self.start(self.base(t))
781 end = self.end(t)
792 end = self.end(t)
782 measure = self.length(self.base(t))
793 measure = self.length(self.base(t))
783 base = self.base(t)
794 base = self.base(t)
784 prev = self.tip()
795 prev = self.tip()
785
796
786 transaction.add(self.datafile, end)
797 transaction.add(self.datafile, end)
787 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
798 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
788 dfh = self.opener(self.datafile, "a")
799 dfh = self.opener(self.datafile, "a")
789 ifh = self.opener(self.indexfile, "a")
800 ifh = self.opener(self.indexfile, "a")
790
801
791 # loop through our set of deltas
802 # loop through our set of deltas
792 chain = None
803 chain = None
793 for chunk in revs:
804 for chunk in revs:
794 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
805 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
795 link = linkmapper(cs)
806 link = linkmapper(cs)
796 if node in self.nodemap:
807 if node in self.nodemap:
797 # this can happen if two branches make the same change
808 # this can happen if two branches make the same change
798 # if unique:
809 # if unique:
799 # raise RevlogError(_("already have %s") % hex(node[:4]))
810 # raise RevlogError(_("already have %s") % hex(node[:4]))
800 chain = node
811 chain = node
801 continue
812 continue
802 delta = chunk[80:]
813 delta = chunk[80:]
803
814
804 for p in (p1, p2):
815 for p in (p1, p2):
805 if not p in self.nodemap:
816 if not p in self.nodemap:
806 raise RevlogError(_("unknown parent %s") % short(p1))
817 raise RevlogError(_("unknown parent %s") % short(p1))
807
818
808 if not chain:
819 if not chain:
809 # retrieve the parent revision of the delta chain
820 # retrieve the parent revision of the delta chain
810 chain = p1
821 chain = p1
811 if not chain in self.nodemap:
822 if not chain in self.nodemap:
812 raise RevlogError(_("unknown base %s") % short(chain[:4]))
823 raise RevlogError(_("unknown base %s") % short(chain[:4]))
813
824
814 # full versions are inserted when the needed deltas become
825 # full versions are inserted when the needed deltas become
815 # comparable to the uncompressed text or when the previous
826 # comparable to the uncompressed text or when the previous
816 # version is not the one we have a delta against. We use
827 # version is not the one we have a delta against. We use
817 # the size of the previous full rev as a proxy for the
828 # the size of the previous full rev as a proxy for the
818 # current size.
829 # current size.
819
830
820 if chain == prev:
831 if chain == prev:
821 tempd = compress(delta)
832 tempd = compress(delta)
822 cdelta = tempd[0] + tempd[1]
833 cdelta = tempd[0] + tempd[1]
823
834
824 if chain != prev or (end - start + len(cdelta)) > measure * 2:
835 if chain != prev or (end - start + len(cdelta)) > measure * 2:
825 # flush our writes here so we can read it in revision
836 # flush our writes here so we can read it in revision
826 dfh.flush()
837 dfh.flush()
827 ifh.flush()
838 ifh.flush()
828 text = self.revision(chain)
839 text = self.revision(chain)
829 text = self.patches(text, [delta])
840 text = self.patches(text, [delta])
830 chk = self.addrevision(text, transaction, link, p1, p2)
841 chk = self.addrevision(text, transaction, link, p1, p2)
831 if chk != node:
842 if chk != node:
832 raise RevlogError(_("consistency error adding group"))
843 raise RevlogError(_("consistency error adding group"))
833 measure = len(text)
844 measure = len(text)
834 else:
845 else:
835 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
846 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
836 self.index.append(e)
847 self.index.append(e)
837 self.nodemap[node] = r
848 self.nodemap[node] = r
838 dfh.write(cdelta)
849 dfh.write(cdelta)
839 ifh.write(struct.pack(indexformat, *e))
850 ifh.write(struct.pack(indexformat, *e))
840
851
841 t, r, chain, prev = r, r + 1, node, node
852 t, r, chain, prev = r, r + 1, node, node
842 start = self.start(self.base(t))
853 start = self.start(self.base(t))
843 end = self.end(t)
854 end = self.end(t)
844
855
845 dfh.close()
856 dfh.close()
846 ifh.close()
857 ifh.close()
847 return node
858 return node
848
859
849 def strip(self, rev, minlink):
860 def strip(self, rev, minlink):
850 if self.count() == 0 or rev >= self.count():
861 if self.count() == 0 or rev >= self.count():
851 return
862 return
852
863
853 # When stripping away a revision, we need to make sure it
864 # When stripping away a revision, we need to make sure it
854 # does not actually belong to an older changeset.
865 # does not actually belong to an older changeset.
855 # The minlink parameter defines the oldest revision
866 # The minlink parameter defines the oldest revision
856 # we're allowed to strip away.
867 # we're allowed to strip away.
857 while minlink > self.index[rev][3]:
868 while minlink > self.index[rev][3]:
858 rev += 1
869 rev += 1
859 if rev >= self.count():
870 if rev >= self.count():
860 return
871 return
861
872
862 # first truncate the files on disk
873 # first truncate the files on disk
863 end = self.start(rev)
874 end = self.start(rev)
864 self.opener(self.datafile, "a").truncate(end)
875 self.opener(self.datafile, "a").truncate(end)
865 end = rev * struct.calcsize(indexformat)
876 end = rev * struct.calcsize(indexformat)
866 self.opener(self.indexfile, "a").truncate(end)
877 self.opener(self.indexfile, "a").truncate(end)
867
878
868 # then reset internal state in memory to forget those revisions
879 # then reset internal state in memory to forget those revisions
869 self.cache = None
880 self.cache = None
870 for p in self.index[rev:]:
881 for p in self.index[rev:]:
871 del self.nodemap[p[6]]
882 del self.nodemap[p[6]]
872 del self.index[rev:]
883 del self.index[rev:]
873
884
874 # truncating the lazyindex also truncates the lazymap.
885 # truncating the lazyindex also truncates the lazymap.
875 if isinstance(self.index, lazyindex):
886 if isinstance(self.index, lazyindex):
876 self.index.trunc(end)
887 self.index.trunc(end)
877
888
878
889
879 def checksize(self):
890 def checksize(self):
880 expected = 0
891 expected = 0
881 if self.count():
892 if self.count():
882 expected = self.end(self.count() - 1)
893 expected = self.end(self.count() - 1)
883 try:
894 try:
884 f = self.opener(self.datafile)
895 f = self.opener(self.datafile)
885 f.seek(0, 2)
896 f.seek(0, 2)
886 actual = f.tell()
897 actual = f.tell()
887 return expected - actual
898 return expected - actual
888 except IOError, inst:
899 except IOError, inst:
889 if inst.errno == errno.ENOENT:
900 if inst.errno == errno.ENOENT:
890 return 0
901 return 0
891 raise
902 raise
892
903
893
904
@@ -1,43 +1,45 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
8 # of the GNU General Public License, incorporated herein by reference.
9
9
10 from demandload import demandload
10 from demandload import demandload
11 demandload(globals(), "changelog filelog httprangereader")
11 demandload(globals(), "changelog filelog httprangereader")
12 demandload(globals(), "localrepo manifest os urllib urllib2")
12 demandload(globals(), "localrepo manifest os urllib urllib2")
13
13
14 class rangereader(httprangereader.httprangereader):
14 class rangereader(httprangereader.httprangereader):
15 def read(self, size=None):
15 def read(self, size=None):
16 try:
16 try:
17 return httprangereader.httprangereader.read(self, size)
17 return httprangereader.httprangereader.read(self, size)
18 except urllib2.URLError, inst:
18 except urllib2.URLError, inst:
19 raise IOError(None, str(inst))
19 raise IOError(None, str(inst))
20
20
21 def opener(base):
21 def opener(base):
22 """return a function that opens files over http"""
22 """return a function that opens files over http"""
23 p = base
23 p = base
24 def o(path, mode="r"):
24 def o(path, mode="r"):
25 f = os.path.join(p, urllib.quote(path))
25 f = os.path.join(p, urllib.quote(path))
26 return rangereader(f)
26 return rangereader(f)
27 return o
27 return o
28
28
29 class statichttprepository(localrepo.localrepository):
29 class statichttprepository(localrepo.localrepository):
30 def __init__(self, ui, path):
30 def __init__(self, ui, path):
31 self.path = (path + "/.hg")
31 self.path = (path + "/.hg")
32 self.ui = ui
32 self.ui = ui
33 self.opener = opener(self.path)
33 self.opener = opener(self.path)
34 self.manifest = manifest.manifest(self.opener)
34 self.manifest = manifest.manifest(self.opener, local=self.local())
35 self.changelog = changelog.changelog(self.opener)
35 self.changelog = changelog.changelog(self.opener, local=self.local())
36 self.tagscache = None
36 self.tagscache = None
37 self.nodetagscache = None
37 self.nodetagscache = None
38 self.encodepats = None
39 self.decodepats = None
38
40
39 def dev(self):
41 def dev(self):
40 return -1
42 return -1
41
43
42 def local(self):
44 def local(self):
43 return False
45 return False
General Comments 0
You need to be logged in to leave comments. Login now