##// END OF EJS Templates
drop superfluous param from revlog.addgroup()
Peter Arrenbrecht -
r6647:602f7c1a default
parent child Browse files
Show More
@@ -1,294 +1,294 b''
1 """
1 """
2 bundlerepo.py - repository class for viewing uncompressed bundles
2 bundlerepo.py - repository class for viewing uncompressed bundles
3
3
4 This provides a read-only repository interface to bundles as if
4 This provides a read-only repository interface to bundles as if
5 they were part of the actual repository.
5 they were part of the actual repository.
6
6
7 Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
7 Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 from i18n import _
14 from i18n import _
15 import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff
15 import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff
16 import repo, localrepo, changelog, manifest, filelog, revlog
16 import repo, localrepo, changelog, manifest, filelog, revlog
17
17
18 class bundlerevlog(revlog.revlog):
18 class bundlerevlog(revlog.revlog):
19 def __init__(self, opener, indexfile, bundlefile,
19 def __init__(self, opener, indexfile, bundlefile,
20 linkmapper=None):
20 linkmapper=None):
21 # How it works:
21 # How it works:
22 # to retrieve a revision, we need to know the offset of
22 # to retrieve a revision, we need to know the offset of
23 # the revision in the bundlefile (an opened file).
23 # the revision in the bundlefile (an opened file).
24 #
24 #
25 # We store this offset in the index (start), to differentiate a
25 # We store this offset in the index (start), to differentiate a
26 # rev in the bundle and from a rev in the revlog, we check
26 # rev in the bundle and from a rev in the revlog, we check
27 # len(index[r]). If the tuple is bigger than 7, it is a bundle
27 # len(index[r]). If the tuple is bigger than 7, it is a bundle
28 # (it is bigger since we store the node to which the delta is)
28 # (it is bigger since we store the node to which the delta is)
29 #
29 #
30 revlog.revlog.__init__(self, opener, indexfile)
30 revlog.revlog.__init__(self, opener, indexfile)
31 self.bundlefile = bundlefile
31 self.bundlefile = bundlefile
32 self.basemap = {}
32 self.basemap = {}
33 def chunkpositer():
33 def chunkpositer():
34 for chunk in changegroup.chunkiter(bundlefile):
34 for chunk in changegroup.chunkiter(bundlefile):
35 pos = bundlefile.tell()
35 pos = bundlefile.tell()
36 yield chunk, pos - len(chunk)
36 yield chunk, pos - len(chunk)
37 n = self.count()
37 n = self.count()
38 prev = None
38 prev = None
39 for chunk, start in chunkpositer():
39 for chunk, start in chunkpositer():
40 size = len(chunk)
40 size = len(chunk)
41 if size < 80:
41 if size < 80:
42 raise util.Abort("invalid changegroup")
42 raise util.Abort("invalid changegroup")
43 start += 80
43 start += 80
44 size -= 80
44 size -= 80
45 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
45 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
46 if node in self.nodemap:
46 if node in self.nodemap:
47 prev = node
47 prev = node
48 continue
48 continue
49 for p in (p1, p2):
49 for p in (p1, p2):
50 if not p in self.nodemap:
50 if not p in self.nodemap:
51 raise revlog.LookupError(p1, self.indexfile,
51 raise revlog.LookupError(p1, self.indexfile,
52 _("unknown parent"))
52 _("unknown parent"))
53 if linkmapper is None:
53 if linkmapper is None:
54 link = n
54 link = n
55 else:
55 else:
56 link = linkmapper(cs)
56 link = linkmapper(cs)
57
57
58 if not prev:
58 if not prev:
59 prev = p1
59 prev = p1
60 # start, size, full unc. size, base (unused), link, p1, p2, node
60 # start, size, full unc. size, base (unused), link, p1, p2, node
61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
62 self.rev(p1), self.rev(p2), node)
62 self.rev(p1), self.rev(p2), node)
63 self.basemap[n] = prev
63 self.basemap[n] = prev
64 self.index.insert(-1, e)
64 self.index.insert(-1, e)
65 self.nodemap[node] = n
65 self.nodemap[node] = n
66 prev = node
66 prev = node
67 n += 1
67 n += 1
68
68
69 def bundle(self, rev):
69 def bundle(self, rev):
70 """is rev from the bundle"""
70 """is rev from the bundle"""
71 if rev < 0:
71 if rev < 0:
72 return False
72 return False
73 return rev in self.basemap
73 return rev in self.basemap
74 def bundlebase(self, rev): return self.basemap[rev]
74 def bundlebase(self, rev): return self.basemap[rev]
75 def chunk(self, rev, df=None, cachelen=4096):
75 def chunk(self, rev, df=None, cachelen=4096):
76 # Warning: in case of bundle, the diff is against bundlebase,
76 # Warning: in case of bundle, the diff is against bundlebase,
77 # not against rev - 1
77 # not against rev - 1
78 # XXX: could use some caching
78 # XXX: could use some caching
79 if not self.bundle(rev):
79 if not self.bundle(rev):
80 return revlog.revlog.chunk(self, rev, df)
80 return revlog.revlog.chunk(self, rev, df)
81 self.bundlefile.seek(self.start(rev))
81 self.bundlefile.seek(self.start(rev))
82 return self.bundlefile.read(self.length(rev))
82 return self.bundlefile.read(self.length(rev))
83
83
84 def revdiff(self, rev1, rev2):
84 def revdiff(self, rev1, rev2):
85 """return or calculate a delta between two revisions"""
85 """return or calculate a delta between two revisions"""
86 if self.bundle(rev1) and self.bundle(rev2):
86 if self.bundle(rev1) and self.bundle(rev2):
87 # hot path for bundle
87 # hot path for bundle
88 revb = self.rev(self.bundlebase(rev2))
88 revb = self.rev(self.bundlebase(rev2))
89 if revb == rev1:
89 if revb == rev1:
90 return self.chunk(rev2)
90 return self.chunk(rev2)
91 elif not self.bundle(rev1) and not self.bundle(rev2):
91 elif not self.bundle(rev1) and not self.bundle(rev2):
92 return revlog.revlog.revdiff(self, rev1, rev2)
92 return revlog.revlog.revdiff(self, rev1, rev2)
93
93
94 return mdiff.textdiff(self.revision(self.node(rev1)),
94 return mdiff.textdiff(self.revision(self.node(rev1)),
95 self.revision(self.node(rev2)))
95 self.revision(self.node(rev2)))
96
96
97 def revision(self, node):
97 def revision(self, node):
98 """return an uncompressed revision of a given"""
98 """return an uncompressed revision of a given"""
99 if node == nullid: return ""
99 if node == nullid: return ""
100
100
101 text = None
101 text = None
102 chain = []
102 chain = []
103 iter_node = node
103 iter_node = node
104 rev = self.rev(iter_node)
104 rev = self.rev(iter_node)
105 # reconstruct the revision if it is from a changegroup
105 # reconstruct the revision if it is from a changegroup
106 while self.bundle(rev):
106 while self.bundle(rev):
107 if self._cache and self._cache[0] == iter_node:
107 if self._cache and self._cache[0] == iter_node:
108 text = self._cache[2]
108 text = self._cache[2]
109 break
109 break
110 chain.append(rev)
110 chain.append(rev)
111 iter_node = self.bundlebase(rev)
111 iter_node = self.bundlebase(rev)
112 rev = self.rev(iter_node)
112 rev = self.rev(iter_node)
113 if text is None:
113 if text is None:
114 text = revlog.revlog.revision(self, iter_node)
114 text = revlog.revlog.revision(self, iter_node)
115
115
116 while chain:
116 while chain:
117 delta = self.chunk(chain.pop())
117 delta = self.chunk(chain.pop())
118 text = mdiff.patches(text, [delta])
118 text = mdiff.patches(text, [delta])
119
119
120 p1, p2 = self.parents(node)
120 p1, p2 = self.parents(node)
121 if node != revlog.hash(text, p1, p2):
121 if node != revlog.hash(text, p1, p2):
122 raise revlog.RevlogError(_("integrity check failed on %s:%d")
122 raise revlog.RevlogError(_("integrity check failed on %s:%d")
123 % (self.datafile, self.rev(node)))
123 % (self.datafile, self.rev(node)))
124
124
125 self._cache = (node, self.rev(node), text)
125 self._cache = (node, self.rev(node), text)
126 return text
126 return text
127
127
128 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
128 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
129 raise NotImplementedError
129 raise NotImplementedError
130 def addgroup(self, revs, linkmapper, transaction, unique=0):
130 def addgroup(self, revs, linkmapper, transaction):
131 raise NotImplementedError
131 raise NotImplementedError
132 def strip(self, rev, minlink):
132 def strip(self, rev, minlink):
133 raise NotImplementedError
133 raise NotImplementedError
134 def checksize(self):
134 def checksize(self):
135 raise NotImplementedError
135 raise NotImplementedError
136
136
137 class bundlechangelog(bundlerevlog, changelog.changelog):
137 class bundlechangelog(bundlerevlog, changelog.changelog):
138 def __init__(self, opener, bundlefile):
138 def __init__(self, opener, bundlefile):
139 changelog.changelog.__init__(self, opener)
139 changelog.changelog.__init__(self, opener)
140 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
140 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
141
141
142 class bundlemanifest(bundlerevlog, manifest.manifest):
142 class bundlemanifest(bundlerevlog, manifest.manifest):
143 def __init__(self, opener, bundlefile, linkmapper):
143 def __init__(self, opener, bundlefile, linkmapper):
144 manifest.manifest.__init__(self, opener)
144 manifest.manifest.__init__(self, opener)
145 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
145 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
146 linkmapper)
146 linkmapper)
147
147
148 class bundlefilelog(bundlerevlog, filelog.filelog):
148 class bundlefilelog(bundlerevlog, filelog.filelog):
149 def __init__(self, opener, path, bundlefile, linkmapper):
149 def __init__(self, opener, path, bundlefile, linkmapper):
150 filelog.filelog.__init__(self, opener, path)
150 filelog.filelog.__init__(self, opener, path)
151 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
151 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
152 linkmapper)
152 linkmapper)
153
153
154 class bundlerepository(localrepo.localrepository):
154 class bundlerepository(localrepo.localrepository):
155 def __init__(self, ui, path, bundlename):
155 def __init__(self, ui, path, bundlename):
156 self._tempparent = None
156 self._tempparent = None
157 try:
157 try:
158 localrepo.localrepository.__init__(self, ui, path)
158 localrepo.localrepository.__init__(self, ui, path)
159 except repo.RepoError:
159 except repo.RepoError:
160 self._tempparent = tempfile.mkdtemp()
160 self._tempparent = tempfile.mkdtemp()
161 tmprepo = localrepo.instance(ui,self._tempparent,1)
161 tmprepo = localrepo.instance(ui,self._tempparent,1)
162 localrepo.localrepository.__init__(self, ui, self._tempparent)
162 localrepo.localrepository.__init__(self, ui, self._tempparent)
163
163
164 if path:
164 if path:
165 self._url = 'bundle:' + path + '+' + bundlename
165 self._url = 'bundle:' + path + '+' + bundlename
166 else:
166 else:
167 self._url = 'bundle:' + bundlename
167 self._url = 'bundle:' + bundlename
168
168
169 self.tempfile = None
169 self.tempfile = None
170 self.bundlefile = open(bundlename, "rb")
170 self.bundlefile = open(bundlename, "rb")
171 header = self.bundlefile.read(6)
171 header = self.bundlefile.read(6)
172 if not header.startswith("HG"):
172 if not header.startswith("HG"):
173 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
173 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
174 elif not header.startswith("HG10"):
174 elif not header.startswith("HG10"):
175 raise util.Abort(_("%s: unknown bundle version") % bundlename)
175 raise util.Abort(_("%s: unknown bundle version") % bundlename)
176 elif (header == "HG10BZ") or (header == "HG10GZ"):
176 elif (header == "HG10BZ") or (header == "HG10GZ"):
177 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
177 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
178 suffix=".hg10un", dir=self.path)
178 suffix=".hg10un", dir=self.path)
179 self.tempfile = temp
179 self.tempfile = temp
180 fptemp = os.fdopen(fdtemp, 'wb')
180 fptemp = os.fdopen(fdtemp, 'wb')
181 def generator(f):
181 def generator(f):
182 if header == "HG10BZ":
182 if header == "HG10BZ":
183 zd = bz2.BZ2Decompressor()
183 zd = bz2.BZ2Decompressor()
184 zd.decompress("BZ")
184 zd.decompress("BZ")
185 elif header == "HG10GZ":
185 elif header == "HG10GZ":
186 zd = zlib.decompressobj()
186 zd = zlib.decompressobj()
187 for chunk in f:
187 for chunk in f:
188 yield zd.decompress(chunk)
188 yield zd.decompress(chunk)
189 gen = generator(util.filechunkiter(self.bundlefile, 4096))
189 gen = generator(util.filechunkiter(self.bundlefile, 4096))
190
190
191 try:
191 try:
192 fptemp.write("HG10UN")
192 fptemp.write("HG10UN")
193 for chunk in gen:
193 for chunk in gen:
194 fptemp.write(chunk)
194 fptemp.write(chunk)
195 finally:
195 finally:
196 fptemp.close()
196 fptemp.close()
197 self.bundlefile.close()
197 self.bundlefile.close()
198
198
199 self.bundlefile = open(self.tempfile, "rb")
199 self.bundlefile = open(self.tempfile, "rb")
200 # seek right after the header
200 # seek right after the header
201 self.bundlefile.seek(6)
201 self.bundlefile.seek(6)
202 elif header == "HG10UN":
202 elif header == "HG10UN":
203 # nothing to do
203 # nothing to do
204 pass
204 pass
205 else:
205 else:
206 raise util.Abort(_("%s: unknown bundle compression type")
206 raise util.Abort(_("%s: unknown bundle compression type")
207 % bundlename)
207 % bundlename)
208 # dict with the mapping 'filename' -> position in the bundle
208 # dict with the mapping 'filename' -> position in the bundle
209 self.bundlefilespos = {}
209 self.bundlefilespos = {}
210
210
211 def __getattr__(self, name):
211 def __getattr__(self, name):
212 if name == 'changelog':
212 if name == 'changelog':
213 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
213 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
214 self.manstart = self.bundlefile.tell()
214 self.manstart = self.bundlefile.tell()
215 return self.changelog
215 return self.changelog
216 if name == 'manifest':
216 if name == 'manifest':
217 self.bundlefile.seek(self.manstart)
217 self.bundlefile.seek(self.manstart)
218 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
218 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
219 self.changelog.rev)
219 self.changelog.rev)
220 self.filestart = self.bundlefile.tell()
220 self.filestart = self.bundlefile.tell()
221 return self.manifest
221 return self.manifest
222 if name == 'manstart':
222 if name == 'manstart':
223 self.changelog
223 self.changelog
224 return self.manstart
224 return self.manstart
225 if name == 'filestart':
225 if name == 'filestart':
226 self.manifest
226 self.manifest
227 return self.filestart
227 return self.filestart
228 return localrepo.localrepository.__getattr__(self, name)
228 return localrepo.localrepository.__getattr__(self, name)
229
229
230 def url(self):
230 def url(self):
231 return self._url
231 return self._url
232
232
233 def file(self, f):
233 def file(self, f):
234 if not self.bundlefilespos:
234 if not self.bundlefilespos:
235 self.bundlefile.seek(self.filestart)
235 self.bundlefile.seek(self.filestart)
236 while 1:
236 while 1:
237 chunk = changegroup.getchunk(self.bundlefile)
237 chunk = changegroup.getchunk(self.bundlefile)
238 if not chunk:
238 if not chunk:
239 break
239 break
240 self.bundlefilespos[chunk] = self.bundlefile.tell()
240 self.bundlefilespos[chunk] = self.bundlefile.tell()
241 for c in changegroup.chunkiter(self.bundlefile):
241 for c in changegroup.chunkiter(self.bundlefile):
242 pass
242 pass
243
243
244 if f[0] == '/':
244 if f[0] == '/':
245 f = f[1:]
245 f = f[1:]
246 if f in self.bundlefilespos:
246 if f in self.bundlefilespos:
247 self.bundlefile.seek(self.bundlefilespos[f])
247 self.bundlefile.seek(self.bundlefilespos[f])
248 return bundlefilelog(self.sopener, f, self.bundlefile,
248 return bundlefilelog(self.sopener, f, self.bundlefile,
249 self.changelog.rev)
249 self.changelog.rev)
250 else:
250 else:
251 return filelog.filelog(self.sopener, f)
251 return filelog.filelog(self.sopener, f)
252
252
253 def close(self):
253 def close(self):
254 """Close assigned bundle file immediately."""
254 """Close assigned bundle file immediately."""
255 self.bundlefile.close()
255 self.bundlefile.close()
256
256
257 def __del__(self):
257 def __del__(self):
258 bundlefile = getattr(self, 'bundlefile', None)
258 bundlefile = getattr(self, 'bundlefile', None)
259 if bundlefile and not bundlefile.closed:
259 if bundlefile and not bundlefile.closed:
260 bundlefile.close()
260 bundlefile.close()
261 tempfile = getattr(self, 'tempfile', None)
261 tempfile = getattr(self, 'tempfile', None)
262 if tempfile is not None:
262 if tempfile is not None:
263 os.unlink(tempfile)
263 os.unlink(tempfile)
264 if self._tempparent:
264 if self._tempparent:
265 shutil.rmtree(self._tempparent, True)
265 shutil.rmtree(self._tempparent, True)
266
266
267 def cancopy(self):
267 def cancopy(self):
268 return False
268 return False
269
269
270 def instance(ui, path, create):
270 def instance(ui, path, create):
271 if create:
271 if create:
272 raise util.Abort(_('cannot create new bundle repository'))
272 raise util.Abort(_('cannot create new bundle repository'))
273 parentpath = ui.config("bundle", "mainreporoot", "")
273 parentpath = ui.config("bundle", "mainreporoot", "")
274 if parentpath:
274 if parentpath:
275 # Try to make the full path relative so we get a nice, short URL.
275 # Try to make the full path relative so we get a nice, short URL.
276 # In particular, we don't want temp dir names in test outputs.
276 # In particular, we don't want temp dir names in test outputs.
277 cwd = os.getcwd()
277 cwd = os.getcwd()
278 if parentpath == cwd:
278 if parentpath == cwd:
279 parentpath = ''
279 parentpath = ''
280 else:
280 else:
281 cwd = os.path.join(cwd,'')
281 cwd = os.path.join(cwd,'')
282 if parentpath.startswith(cwd):
282 if parentpath.startswith(cwd):
283 parentpath = parentpath[len(cwd):]
283 parentpath = parentpath[len(cwd):]
284 path = util.drop_scheme('file', path)
284 path = util.drop_scheme('file', path)
285 if path.startswith('bundle:'):
285 if path.startswith('bundle:'):
286 path = util.drop_scheme('bundle', path)
286 path = util.drop_scheme('bundle', path)
287 s = path.split("+", 1)
287 s = path.split("+", 1)
288 if len(s) == 1:
288 if len(s) == 1:
289 repopath, bundlename = parentpath, s[0]
289 repopath, bundlename = parentpath, s[0]
290 else:
290 else:
291 repopath, bundlename = s
291 repopath, bundlename = s
292 else:
292 else:
293 repopath, bundlename = parentpath, path
293 repopath, bundlename = parentpath, path
294 return bundlerepository(ui, repopath, bundlename)
294 return bundlerepository(ui, repopath, bundlename)
@@ -1,2134 +1,2134 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15
15
16 class localrepository(repo.repository):
16 class localrepository(repo.repository):
17 capabilities = util.set(('lookup', 'changegroupsubset'))
17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 supported = ('revlogv1', 'store')
18 supported = ('revlogv1', 'store')
19
19
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 repo.repository.__init__(self)
21 repo.repository.__init__(self)
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 # setup store
64 if "store" in requirements:
64 if "store" in requirements:
65 self.encodefn = util.encodefilename
65 self.encodefn = util.encodefilename
66 self.decodefn = util.decodefilename
66 self.decodefn = util.decodefilename
67 self.spath = os.path.join(self.path, "store")
67 self.spath = os.path.join(self.path, "store")
68 else:
68 else:
69 self.encodefn = lambda x: x
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
71 self.spath = self.path
72
72
73 try:
73 try:
74 # files in .hg/ will be created using this mode
74 # files in .hg/ will be created using this mode
75 mode = os.stat(self.spath).st_mode
75 mode = os.stat(self.spath).st_mode
76 # avoid some useless chmods
76 # avoid some useless chmods
77 if (0777 & ~util._umask) == (0777 & mode):
77 if (0777 & ~util._umask) == (0777 & mode):
78 mode = None
78 mode = None
79 except OSError:
79 except OSError:
80 mode = None
80 mode = None
81
81
82 self._createmode = mode
82 self._createmode = mode
83 self.opener.createmode = mode
83 self.opener.createmode = mode
84 sopener = util.opener(self.spath)
84 sopener = util.opener(self.spath)
85 sopener.createmode = mode
85 sopener.createmode = mode
86 self.sopener = util.encodedopener(sopener, self.encodefn)
86 self.sopener = util.encodedopener(sopener, self.encodefn)
87
87
88 self.ui = ui.ui(parentui=parentui)
88 self.ui = ui.ui(parentui=parentui)
89 try:
89 try:
90 self.ui.readconfig(self.join("hgrc"), self.root)
90 self.ui.readconfig(self.join("hgrc"), self.root)
91 extensions.loadall(self.ui)
91 extensions.loadall(self.ui)
92 except IOError:
92 except IOError:
93 pass
93 pass
94
94
95 self.tagscache = None
95 self.tagscache = None
96 self._tagstypecache = None
96 self._tagstypecache = None
97 self.branchcache = None
97 self.branchcache = None
98 self._ubranchcache = None # UTF-8 version of branchcache
98 self._ubranchcache = None # UTF-8 version of branchcache
99 self._branchcachetip = None
99 self._branchcachetip = None
100 self.nodetagscache = None
100 self.nodetagscache = None
101 self.filterpats = {}
101 self.filterpats = {}
102 self._datafilters = {}
102 self._datafilters = {}
103 self._transref = self._lockref = self._wlockref = None
103 self._transref = self._lockref = self._wlockref = None
104
104
105 def __getattr__(self, name):
105 def __getattr__(self, name):
106 if name == 'changelog':
106 if name == 'changelog':
107 self.changelog = changelog.changelog(self.sopener)
107 self.changelog = changelog.changelog(self.sopener)
108 self.sopener.defversion = self.changelog.version
108 self.sopener.defversion = self.changelog.version
109 return self.changelog
109 return self.changelog
110 if name == 'manifest':
110 if name == 'manifest':
111 self.changelog
111 self.changelog
112 self.manifest = manifest.manifest(self.sopener)
112 self.manifest = manifest.manifest(self.sopener)
113 return self.manifest
113 return self.manifest
114 if name == 'dirstate':
114 if name == 'dirstate':
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 return self.dirstate
116 return self.dirstate
117 else:
117 else:
118 raise AttributeError, name
118 raise AttributeError, name
119
119
120 def url(self):
120 def url(self):
121 return 'file:' + self.root
121 return 'file:' + self.root
122
122
123 def hook(self, name, throw=False, **args):
123 def hook(self, name, throw=False, **args):
124 return hook.hook(self.ui, self, name, throw, **args)
124 return hook.hook(self.ui, self, name, throw, **args)
125
125
126 tag_disallowed = ':\r\n'
126 tag_disallowed = ':\r\n'
127
127
128 def _tag(self, names, node, message, local, user, date, parent=None,
128 def _tag(self, names, node, message, local, user, date, parent=None,
129 extra={}):
129 extra={}):
130 use_dirstate = parent is None
130 use_dirstate = parent is None
131
131
132 if isinstance(names, str):
132 if isinstance(names, str):
133 allchars = names
133 allchars = names
134 names = (names,)
134 names = (names,)
135 else:
135 else:
136 allchars = ''.join(names)
136 allchars = ''.join(names)
137 for c in self.tag_disallowed:
137 for c in self.tag_disallowed:
138 if c in allchars:
138 if c in allchars:
139 raise util.Abort(_('%r cannot be used in a tag name') % c)
139 raise util.Abort(_('%r cannot be used in a tag name') % c)
140
140
141 for name in names:
141 for name in names:
142 self.hook('pretag', throw=True, node=hex(node), tag=name,
142 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 local=local)
143 local=local)
144
144
145 def writetags(fp, names, munge, prevtags):
145 def writetags(fp, names, munge, prevtags):
146 fp.seek(0, 2)
146 fp.seek(0, 2)
147 if prevtags and prevtags[-1] != '\n':
147 if prevtags and prevtags[-1] != '\n':
148 fp.write('\n')
148 fp.write('\n')
149 for name in names:
149 for name in names:
150 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
150 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
151 fp.close()
151 fp.close()
152
152
153 prevtags = ''
153 prevtags = ''
154 if local:
154 if local:
155 try:
155 try:
156 fp = self.opener('localtags', 'r+')
156 fp = self.opener('localtags', 'r+')
157 except IOError, err:
157 except IOError, err:
158 fp = self.opener('localtags', 'a')
158 fp = self.opener('localtags', 'a')
159 else:
159 else:
160 prevtags = fp.read()
160 prevtags = fp.read()
161
161
162 # local tags are stored in the current charset
162 # local tags are stored in the current charset
163 writetags(fp, names, None, prevtags)
163 writetags(fp, names, None, prevtags)
164 for name in names:
164 for name in names:
165 self.hook('tag', node=hex(node), tag=name, local=local)
165 self.hook('tag', node=hex(node), tag=name, local=local)
166 return
166 return
167
167
168 if use_dirstate:
168 if use_dirstate:
169 try:
169 try:
170 fp = self.wfile('.hgtags', 'rb+')
170 fp = self.wfile('.hgtags', 'rb+')
171 except IOError, err:
171 except IOError, err:
172 fp = self.wfile('.hgtags', 'ab')
172 fp = self.wfile('.hgtags', 'ab')
173 else:
173 else:
174 prevtags = fp.read()
174 prevtags = fp.read()
175 else:
175 else:
176 try:
176 try:
177 prevtags = self.filectx('.hgtags', parent).data()
177 prevtags = self.filectx('.hgtags', parent).data()
178 except revlog.LookupError:
178 except revlog.LookupError:
179 pass
179 pass
180 fp = self.wfile('.hgtags', 'wb')
180 fp = self.wfile('.hgtags', 'wb')
181 if prevtags:
181 if prevtags:
182 fp.write(prevtags)
182 fp.write(prevtags)
183
183
184 # committed tags are stored in UTF-8
184 # committed tags are stored in UTF-8
185 writetags(fp, names, util.fromlocal, prevtags)
185 writetags(fp, names, util.fromlocal, prevtags)
186
186
187 if use_dirstate and '.hgtags' not in self.dirstate:
187 if use_dirstate and '.hgtags' not in self.dirstate:
188 self.add(['.hgtags'])
188 self.add(['.hgtags'])
189
189
190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
191 extra=extra)
191 extra=extra)
192
192
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195
195
196 return tagnode
196 return tagnode
197
197
198 def tag(self, names, node, message, local, user, date):
198 def tag(self, names, node, message, local, user, date):
199 '''tag a revision with one or more symbolic names.
199 '''tag a revision with one or more symbolic names.
200
200
201 names is a list of strings or, when adding a single tag, names may be a
201 names is a list of strings or, when adding a single tag, names may be a
202 string.
202 string.
203
203
204 if local is True, the tags are stored in a per-repository file.
204 if local is True, the tags are stored in a per-repository file.
205 otherwise, they are stored in the .hgtags file, and a new
205 otherwise, they are stored in the .hgtags file, and a new
206 changeset is committed with the change.
206 changeset is committed with the change.
207
207
208 keyword arguments:
208 keyword arguments:
209
209
210 local: whether to store tags in non-version-controlled file
210 local: whether to store tags in non-version-controlled file
211 (default False)
211 (default False)
212
212
213 message: commit message to use if committing
213 message: commit message to use if committing
214
214
215 user: name of user to use if committing
215 user: name of user to use if committing
216
216
217 date: date tuple to use if committing'''
217 date: date tuple to use if committing'''
218
218
219 for x in self.status()[:5]:
219 for x in self.status()[:5]:
220 if '.hgtags' in x:
220 if '.hgtags' in x:
221 raise util.Abort(_('working copy of .hgtags is changed '
221 raise util.Abort(_('working copy of .hgtags is changed '
222 '(please commit .hgtags manually)'))
222 '(please commit .hgtags manually)'))
223
223
224 self._tag(names, node, message, local, user, date)
224 self._tag(names, node, message, local, user, date)
225
225
226 def tags(self):
226 def tags(self):
227 '''return a mapping of tag to node'''
227 '''return a mapping of tag to node'''
228 if self.tagscache:
228 if self.tagscache:
229 return self.tagscache
229 return self.tagscache
230
230
231 globaltags = {}
231 globaltags = {}
232 tagtypes = {}
232 tagtypes = {}
233
233
234 def readtags(lines, fn, tagtype):
234 def readtags(lines, fn, tagtype):
235 filetags = {}
235 filetags = {}
236 count = 0
236 count = 0
237
237
238 def warn(msg):
238 def warn(msg):
239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
240
240
241 for l in lines:
241 for l in lines:
242 count += 1
242 count += 1
243 if not l:
243 if not l:
244 continue
244 continue
245 s = l.split(" ", 1)
245 s = l.split(" ", 1)
246 if len(s) != 2:
246 if len(s) != 2:
247 warn(_("cannot parse entry"))
247 warn(_("cannot parse entry"))
248 continue
248 continue
249 node, key = s
249 node, key = s
250 key = util.tolocal(key.strip()) # stored in UTF-8
250 key = util.tolocal(key.strip()) # stored in UTF-8
251 try:
251 try:
252 bin_n = bin(node)
252 bin_n = bin(node)
253 except TypeError:
253 except TypeError:
254 warn(_("node '%s' is not well formed") % node)
254 warn(_("node '%s' is not well formed") % node)
255 continue
255 continue
256 if bin_n not in self.changelog.nodemap:
256 if bin_n not in self.changelog.nodemap:
257 warn(_("tag '%s' refers to unknown node") % key)
257 warn(_("tag '%s' refers to unknown node") % key)
258 continue
258 continue
259
259
260 h = []
260 h = []
261 if key in filetags:
261 if key in filetags:
262 n, h = filetags[key]
262 n, h = filetags[key]
263 h.append(n)
263 h.append(n)
264 filetags[key] = (bin_n, h)
264 filetags[key] = (bin_n, h)
265
265
266 for k, nh in filetags.items():
266 for k, nh in filetags.items():
267 if k not in globaltags:
267 if k not in globaltags:
268 globaltags[k] = nh
268 globaltags[k] = nh
269 tagtypes[k] = tagtype
269 tagtypes[k] = tagtype
270 continue
270 continue
271
271
272 # we prefer the global tag if:
272 # we prefer the global tag if:
273 # it supercedes us OR
273 # it supercedes us OR
274 # mutual supercedes and it has a higher rank
274 # mutual supercedes and it has a higher rank
275 # otherwise we win because we're tip-most
275 # otherwise we win because we're tip-most
276 an, ah = nh
276 an, ah = nh
277 bn, bh = globaltags[k]
277 bn, bh = globaltags[k]
278 if (bn != an and an in bh and
278 if (bn != an and an in bh and
279 (bn not in ah or len(bh) > len(ah))):
279 (bn not in ah or len(bh) > len(ah))):
280 an = bn
280 an = bn
281 ah.extend([n for n in bh if n not in ah])
281 ah.extend([n for n in bh if n not in ah])
282 globaltags[k] = an, ah
282 globaltags[k] = an, ah
283 tagtypes[k] = tagtype
283 tagtypes[k] = tagtype
284
284
285 # read the tags file from each head, ending with the tip
285 # read the tags file from each head, ending with the tip
286 f = None
286 f = None
287 for rev, node, fnode in self._hgtagsnodes():
287 for rev, node, fnode in self._hgtagsnodes():
288 f = (f and f.filectx(fnode) or
288 f = (f and f.filectx(fnode) or
289 self.filectx('.hgtags', fileid=fnode))
289 self.filectx('.hgtags', fileid=fnode))
290 readtags(f.data().splitlines(), f, "global")
290 readtags(f.data().splitlines(), f, "global")
291
291
292 try:
292 try:
293 data = util.fromlocal(self.opener("localtags").read())
293 data = util.fromlocal(self.opener("localtags").read())
294 # localtags are stored in the local character set
294 # localtags are stored in the local character set
295 # while the internal tag table is stored in UTF-8
295 # while the internal tag table is stored in UTF-8
296 readtags(data.splitlines(), "localtags", "local")
296 readtags(data.splitlines(), "localtags", "local")
297 except IOError:
297 except IOError:
298 pass
298 pass
299
299
300 self.tagscache = {}
300 self.tagscache = {}
301 self._tagstypecache = {}
301 self._tagstypecache = {}
302 for k,nh in globaltags.items():
302 for k,nh in globaltags.items():
303 n = nh[0]
303 n = nh[0]
304 if n != nullid:
304 if n != nullid:
305 self.tagscache[k] = n
305 self.tagscache[k] = n
306 self._tagstypecache[k] = tagtypes[k]
306 self._tagstypecache[k] = tagtypes[k]
307 self.tagscache['tip'] = self.changelog.tip()
307 self.tagscache['tip'] = self.changelog.tip()
308
308
309 return self.tagscache
309 return self.tagscache
310
310
311 def tagtype(self, tagname):
311 def tagtype(self, tagname):
312 '''
312 '''
313 return the type of the given tag. result can be:
313 return the type of the given tag. result can be:
314
314
315 'local' : a local tag
315 'local' : a local tag
316 'global' : a global tag
316 'global' : a global tag
317 None : tag does not exist
317 None : tag does not exist
318 '''
318 '''
319
319
320 self.tags()
320 self.tags()
321
321
322 return self._tagstypecache.get(tagname)
322 return self._tagstypecache.get(tagname)
323
323
324 def _hgtagsnodes(self):
324 def _hgtagsnodes(self):
325 heads = self.heads()
325 heads = self.heads()
326 heads.reverse()
326 heads.reverse()
327 last = {}
327 last = {}
328 ret = []
328 ret = []
329 for node in heads:
329 for node in heads:
330 c = self.changectx(node)
330 c = self.changectx(node)
331 rev = c.rev()
331 rev = c.rev()
332 try:
332 try:
333 fnode = c.filenode('.hgtags')
333 fnode = c.filenode('.hgtags')
334 except revlog.LookupError:
334 except revlog.LookupError:
335 continue
335 continue
336 ret.append((rev, node, fnode))
336 ret.append((rev, node, fnode))
337 if fnode in last:
337 if fnode in last:
338 ret[last[fnode]] = None
338 ret[last[fnode]] = None
339 last[fnode] = len(ret) - 1
339 last[fnode] = len(ret) - 1
340 return [item for item in ret if item]
340 return [item for item in ret if item]
341
341
342 def tagslist(self):
342 def tagslist(self):
343 '''return a list of tags ordered by revision'''
343 '''return a list of tags ordered by revision'''
344 l = []
344 l = []
345 for t, n in self.tags().items():
345 for t, n in self.tags().items():
346 try:
346 try:
347 r = self.changelog.rev(n)
347 r = self.changelog.rev(n)
348 except:
348 except:
349 r = -2 # sort to the beginning of the list if unknown
349 r = -2 # sort to the beginning of the list if unknown
350 l.append((r, t, n))
350 l.append((r, t, n))
351 l.sort()
351 l.sort()
352 return [(t, n) for r, t, n in l]
352 return [(t, n) for r, t, n in l]
353
353
354 def nodetags(self, node):
354 def nodetags(self, node):
355 '''return the tags associated with a node'''
355 '''return the tags associated with a node'''
356 if not self.nodetagscache:
356 if not self.nodetagscache:
357 self.nodetagscache = {}
357 self.nodetagscache = {}
358 for t, n in self.tags().items():
358 for t, n in self.tags().items():
359 self.nodetagscache.setdefault(n, []).append(t)
359 self.nodetagscache.setdefault(n, []).append(t)
360 return self.nodetagscache.get(node, [])
360 return self.nodetagscache.get(node, [])
361
361
362 def _branchtags(self, partial, lrev):
362 def _branchtags(self, partial, lrev):
363 tiprev = self.changelog.count() - 1
363 tiprev = self.changelog.count() - 1
364 if lrev != tiprev:
364 if lrev != tiprev:
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367
367
368 return partial
368 return partial
369
369
370 def branchtags(self):
370 def branchtags(self):
371 tip = self.changelog.tip()
371 tip = self.changelog.tip()
372 if self.branchcache is not None and self._branchcachetip == tip:
372 if self.branchcache is not None and self._branchcachetip == tip:
373 return self.branchcache
373 return self.branchcache
374
374
375 oldtip = self._branchcachetip
375 oldtip = self._branchcachetip
376 self._branchcachetip = tip
376 self._branchcachetip = tip
377 if self.branchcache is None:
377 if self.branchcache is None:
378 self.branchcache = {} # avoid recursion in changectx
378 self.branchcache = {} # avoid recursion in changectx
379 else:
379 else:
380 self.branchcache.clear() # keep using the same dict
380 self.branchcache.clear() # keep using the same dict
381 if oldtip is None or oldtip not in self.changelog.nodemap:
381 if oldtip is None or oldtip not in self.changelog.nodemap:
382 partial, last, lrev = self._readbranchcache()
382 partial, last, lrev = self._readbranchcache()
383 else:
383 else:
384 lrev = self.changelog.rev(oldtip)
384 lrev = self.changelog.rev(oldtip)
385 partial = self._ubranchcache
385 partial = self._ubranchcache
386
386
387 self._branchtags(partial, lrev)
387 self._branchtags(partial, lrev)
388
388
389 # the branch cache is stored on disk as UTF-8, but in the local
389 # the branch cache is stored on disk as UTF-8, but in the local
390 # charset internally
390 # charset internally
391 for k, v in partial.items():
391 for k, v in partial.items():
392 self.branchcache[util.tolocal(k)] = v
392 self.branchcache[util.tolocal(k)] = v
393 self._ubranchcache = partial
393 self._ubranchcache = partial
394 return self.branchcache
394 return self.branchcache
395
395
396 def _readbranchcache(self):
396 def _readbranchcache(self):
397 partial = {}
397 partial = {}
398 try:
398 try:
399 f = self.opener("branch.cache")
399 f = self.opener("branch.cache")
400 lines = f.read().split('\n')
400 lines = f.read().split('\n')
401 f.close()
401 f.close()
402 except (IOError, OSError):
402 except (IOError, OSError):
403 return {}, nullid, nullrev
403 return {}, nullid, nullrev
404
404
405 try:
405 try:
406 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = lines.pop(0).split(" ", 1)
407 last, lrev = bin(last), int(lrev)
407 last, lrev = bin(last), int(lrev)
408 if not (lrev < self.changelog.count() and
408 if not (lrev < self.changelog.count() and
409 self.changelog.node(lrev) == last): # sanity check
409 self.changelog.node(lrev) == last): # sanity check
410 # invalidate the cache
410 # invalidate the cache
411 raise ValueError('invalidating branch cache (tip differs)')
411 raise ValueError('invalidating branch cache (tip differs)')
412 for l in lines:
412 for l in lines:
413 if not l: continue
413 if not l: continue
414 node, label = l.split(" ", 1)
414 node, label = l.split(" ", 1)
415 partial[label.strip()] = bin(node)
415 partial[label.strip()] = bin(node)
416 except (KeyboardInterrupt, util.SignalInterrupt):
416 except (KeyboardInterrupt, util.SignalInterrupt):
417 raise
417 raise
418 except Exception, inst:
418 except Exception, inst:
419 if self.ui.debugflag:
419 if self.ui.debugflag:
420 self.ui.warn(str(inst), '\n')
420 self.ui.warn(str(inst), '\n')
421 partial, last, lrev = {}, nullid, nullrev
421 partial, last, lrev = {}, nullid, nullrev
422 return partial, last, lrev
422 return partial, last, lrev
423
423
424 def _writebranchcache(self, branches, tip, tiprev):
424 def _writebranchcache(self, branches, tip, tiprev):
425 try:
425 try:
426 f = self.opener("branch.cache", "w", atomictemp=True)
426 f = self.opener("branch.cache", "w", atomictemp=True)
427 f.write("%s %s\n" % (hex(tip), tiprev))
427 f.write("%s %s\n" % (hex(tip), tiprev))
428 for label, node in branches.iteritems():
428 for label, node in branches.iteritems():
429 f.write("%s %s\n" % (hex(node), label))
429 f.write("%s %s\n" % (hex(node), label))
430 f.rename()
430 f.rename()
431 except (IOError, OSError):
431 except (IOError, OSError):
432 pass
432 pass
433
433
434 def _updatebranchcache(self, partial, start, end):
434 def _updatebranchcache(self, partial, start, end):
435 for r in xrange(start, end):
435 for r in xrange(start, end):
436 c = self.changectx(r)
436 c = self.changectx(r)
437 b = c.branch()
437 b = c.branch()
438 partial[b] = c.node()
438 partial[b] = c.node()
439
439
440 def lookup(self, key):
440 def lookup(self, key):
441 if key == '.':
441 if key == '.':
442 key, second = self.dirstate.parents()
442 key, second = self.dirstate.parents()
443 if key == nullid:
443 if key == nullid:
444 raise repo.RepoError(_("no revision checked out"))
444 raise repo.RepoError(_("no revision checked out"))
445 if second != nullid:
445 if second != nullid:
446 self.ui.warn(_("warning: working directory has two parents, "
446 self.ui.warn(_("warning: working directory has two parents, "
447 "tag '.' uses the first\n"))
447 "tag '.' uses the first\n"))
448 elif key == 'null':
448 elif key == 'null':
449 return nullid
449 return nullid
450 n = self.changelog._match(key)
450 n = self.changelog._match(key)
451 if n:
451 if n:
452 return n
452 return n
453 if key in self.tags():
453 if key in self.tags():
454 return self.tags()[key]
454 return self.tags()[key]
455 if key in self.branchtags():
455 if key in self.branchtags():
456 return self.branchtags()[key]
456 return self.branchtags()[key]
457 n = self.changelog._partialmatch(key)
457 n = self.changelog._partialmatch(key)
458 if n:
458 if n:
459 return n
459 return n
460 try:
460 try:
461 if len(key) == 20:
461 if len(key) == 20:
462 key = hex(key)
462 key = hex(key)
463 except:
463 except:
464 pass
464 pass
465 raise repo.RepoError(_("unknown revision '%s'") % key)
465 raise repo.RepoError(_("unknown revision '%s'") % key)
466
466
467 def local(self):
467 def local(self):
468 return True
468 return True
469
469
470 def join(self, f):
470 def join(self, f):
471 return os.path.join(self.path, f)
471 return os.path.join(self.path, f)
472
472
473 def sjoin(self, f):
473 def sjoin(self, f):
474 f = self.encodefn(f)
474 f = self.encodefn(f)
475 return os.path.join(self.spath, f)
475 return os.path.join(self.spath, f)
476
476
477 def wjoin(self, f):
477 def wjoin(self, f):
478 return os.path.join(self.root, f)
478 return os.path.join(self.root, f)
479
479
480 def rjoin(self, f):
480 def rjoin(self, f):
481 return os.path.join(self.root, util.pconvert(f))
481 return os.path.join(self.root, util.pconvert(f))
482
482
483 def file(self, f):
483 def file(self, f):
484 if f[0] == '/':
484 if f[0] == '/':
485 f = f[1:]
485 f = f[1:]
486 return filelog.filelog(self.sopener, f)
486 return filelog.filelog(self.sopener, f)
487
487
488 def changectx(self, changeid=None):
488 def changectx(self, changeid=None):
489 return context.changectx(self, changeid)
489 return context.changectx(self, changeid)
490
490
491 def workingctx(self):
491 def workingctx(self):
492 return context.workingctx(self)
492 return context.workingctx(self)
493
493
494 def parents(self, changeid=None):
494 def parents(self, changeid=None):
495 '''
495 '''
496 get list of changectxs for parents of changeid or working directory
496 get list of changectxs for parents of changeid or working directory
497 '''
497 '''
498 if changeid is None:
498 if changeid is None:
499 pl = self.dirstate.parents()
499 pl = self.dirstate.parents()
500 else:
500 else:
501 n = self.changelog.lookup(changeid)
501 n = self.changelog.lookup(changeid)
502 pl = self.changelog.parents(n)
502 pl = self.changelog.parents(n)
503 if pl[1] == nullid:
503 if pl[1] == nullid:
504 return [self.changectx(pl[0])]
504 return [self.changectx(pl[0])]
505 return [self.changectx(pl[0]), self.changectx(pl[1])]
505 return [self.changectx(pl[0]), self.changectx(pl[1])]
506
506
507 def filectx(self, path, changeid=None, fileid=None):
507 def filectx(self, path, changeid=None, fileid=None):
508 """changeid can be a changeset revision, node, or tag.
508 """changeid can be a changeset revision, node, or tag.
509 fileid can be a file revision or node."""
509 fileid can be a file revision or node."""
510 return context.filectx(self, path, changeid, fileid)
510 return context.filectx(self, path, changeid, fileid)
511
511
512 def getcwd(self):
512 def getcwd(self):
513 return self.dirstate.getcwd()
513 return self.dirstate.getcwd()
514
514
515 def pathto(self, f, cwd=None):
515 def pathto(self, f, cwd=None):
516 return self.dirstate.pathto(f, cwd)
516 return self.dirstate.pathto(f, cwd)
517
517
518 def wfile(self, f, mode='r'):
518 def wfile(self, f, mode='r'):
519 return self.wopener(f, mode)
519 return self.wopener(f, mode)
520
520
521 def _link(self, f):
521 def _link(self, f):
522 return os.path.islink(self.wjoin(f))
522 return os.path.islink(self.wjoin(f))
523
523
524 def _filter(self, filter, filename, data):
524 def _filter(self, filter, filename, data):
525 if filter not in self.filterpats:
525 if filter not in self.filterpats:
526 l = []
526 l = []
527 for pat, cmd in self.ui.configitems(filter):
527 for pat, cmd in self.ui.configitems(filter):
528 mf = util.matcher(self.root, "", [pat], [], [])[1]
528 mf = util.matcher(self.root, "", [pat], [], [])[1]
529 fn = None
529 fn = None
530 params = cmd
530 params = cmd
531 for name, filterfn in self._datafilters.iteritems():
531 for name, filterfn in self._datafilters.iteritems():
532 if cmd.startswith(name):
532 if cmd.startswith(name):
533 fn = filterfn
533 fn = filterfn
534 params = cmd[len(name):].lstrip()
534 params = cmd[len(name):].lstrip()
535 break
535 break
536 if not fn:
536 if not fn:
537 fn = lambda s, c, **kwargs: util.filter(s, c)
537 fn = lambda s, c, **kwargs: util.filter(s, c)
538 # Wrap old filters not supporting keyword arguments
538 # Wrap old filters not supporting keyword arguments
539 if not inspect.getargspec(fn)[2]:
539 if not inspect.getargspec(fn)[2]:
540 oldfn = fn
540 oldfn = fn
541 fn = lambda s, c, **kwargs: oldfn(s, c)
541 fn = lambda s, c, **kwargs: oldfn(s, c)
542 l.append((mf, fn, params))
542 l.append((mf, fn, params))
543 self.filterpats[filter] = l
543 self.filterpats[filter] = l
544
544
545 for mf, fn, cmd in self.filterpats[filter]:
545 for mf, fn, cmd in self.filterpats[filter]:
546 if mf(filename):
546 if mf(filename):
547 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
547 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
548 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
548 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
549 break
549 break
550
550
551 return data
551 return data
552
552
553 def adddatafilter(self, name, filter):
553 def adddatafilter(self, name, filter):
554 self._datafilters[name] = filter
554 self._datafilters[name] = filter
555
555
556 def wread(self, filename):
556 def wread(self, filename):
557 if self._link(filename):
557 if self._link(filename):
558 data = os.readlink(self.wjoin(filename))
558 data = os.readlink(self.wjoin(filename))
559 else:
559 else:
560 data = self.wopener(filename, 'r').read()
560 data = self.wopener(filename, 'r').read()
561 return self._filter("encode", filename, data)
561 return self._filter("encode", filename, data)
562
562
563 def wwrite(self, filename, data, flags):
563 def wwrite(self, filename, data, flags):
564 data = self._filter("decode", filename, data)
564 data = self._filter("decode", filename, data)
565 try:
565 try:
566 os.unlink(self.wjoin(filename))
566 os.unlink(self.wjoin(filename))
567 except OSError:
567 except OSError:
568 pass
568 pass
569 self.wopener(filename, 'w').write(data)
569 self.wopener(filename, 'w').write(data)
570 util.set_flags(self.wjoin(filename), flags)
570 util.set_flags(self.wjoin(filename), flags)
571
571
572 def wwritedata(self, filename, data):
572 def wwritedata(self, filename, data):
573 return self._filter("decode", filename, data)
573 return self._filter("decode", filename, data)
574
574
575 def transaction(self):
575 def transaction(self):
576 if self._transref and self._transref():
576 if self._transref and self._transref():
577 return self._transref().nest()
577 return self._transref().nest()
578
578
579 # abort here if the journal already exists
579 # abort here if the journal already exists
580 if os.path.exists(self.sjoin("journal")):
580 if os.path.exists(self.sjoin("journal")):
581 raise repo.RepoError(_("journal already exists - run hg recover"))
581 raise repo.RepoError(_("journal already exists - run hg recover"))
582
582
583 # save dirstate for rollback
583 # save dirstate for rollback
584 try:
584 try:
585 ds = self.opener("dirstate").read()
585 ds = self.opener("dirstate").read()
586 except IOError:
586 except IOError:
587 ds = ""
587 ds = ""
588 self.opener("journal.dirstate", "w").write(ds)
588 self.opener("journal.dirstate", "w").write(ds)
589 self.opener("journal.branch", "w").write(self.dirstate.branch())
589 self.opener("journal.branch", "w").write(self.dirstate.branch())
590
590
591 renames = [(self.sjoin("journal"), self.sjoin("undo")),
591 renames = [(self.sjoin("journal"), self.sjoin("undo")),
592 (self.join("journal.dirstate"), self.join("undo.dirstate")),
592 (self.join("journal.dirstate"), self.join("undo.dirstate")),
593 (self.join("journal.branch"), self.join("undo.branch"))]
593 (self.join("journal.branch"), self.join("undo.branch"))]
594 tr = transaction.transaction(self.ui.warn, self.sopener,
594 tr = transaction.transaction(self.ui.warn, self.sopener,
595 self.sjoin("journal"),
595 self.sjoin("journal"),
596 aftertrans(renames),
596 aftertrans(renames),
597 self._createmode)
597 self._createmode)
598 self._transref = weakref.ref(tr)
598 self._transref = weakref.ref(tr)
599 return tr
599 return tr
600
600
601 def recover(self):
601 def recover(self):
602 l = self.lock()
602 l = self.lock()
603 try:
603 try:
604 if os.path.exists(self.sjoin("journal")):
604 if os.path.exists(self.sjoin("journal")):
605 self.ui.status(_("rolling back interrupted transaction\n"))
605 self.ui.status(_("rolling back interrupted transaction\n"))
606 transaction.rollback(self.sopener, self.sjoin("journal"))
606 transaction.rollback(self.sopener, self.sjoin("journal"))
607 self.invalidate()
607 self.invalidate()
608 return True
608 return True
609 else:
609 else:
610 self.ui.warn(_("no interrupted transaction available\n"))
610 self.ui.warn(_("no interrupted transaction available\n"))
611 return False
611 return False
612 finally:
612 finally:
613 del l
613 del l
614
614
615 def rollback(self):
615 def rollback(self):
616 wlock = lock = None
616 wlock = lock = None
617 try:
617 try:
618 wlock = self.wlock()
618 wlock = self.wlock()
619 lock = self.lock()
619 lock = self.lock()
620 if os.path.exists(self.sjoin("undo")):
620 if os.path.exists(self.sjoin("undo")):
621 self.ui.status(_("rolling back last transaction\n"))
621 self.ui.status(_("rolling back last transaction\n"))
622 transaction.rollback(self.sopener, self.sjoin("undo"))
622 transaction.rollback(self.sopener, self.sjoin("undo"))
623 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
623 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
624 try:
624 try:
625 branch = self.opener("undo.branch").read()
625 branch = self.opener("undo.branch").read()
626 self.dirstate.setbranch(branch)
626 self.dirstate.setbranch(branch)
627 except IOError:
627 except IOError:
628 self.ui.warn(_("Named branch could not be reset, "
628 self.ui.warn(_("Named branch could not be reset, "
629 "current branch still is: %s\n")
629 "current branch still is: %s\n")
630 % util.tolocal(self.dirstate.branch()))
630 % util.tolocal(self.dirstate.branch()))
631 self.invalidate()
631 self.invalidate()
632 self.dirstate.invalidate()
632 self.dirstate.invalidate()
633 else:
633 else:
634 self.ui.warn(_("no rollback information available\n"))
634 self.ui.warn(_("no rollback information available\n"))
635 finally:
635 finally:
636 del lock, wlock
636 del lock, wlock
637
637
638 def invalidate(self):
638 def invalidate(self):
639 for a in "changelog manifest".split():
639 for a in "changelog manifest".split():
640 if a in self.__dict__:
640 if a in self.__dict__:
641 delattr(self, a)
641 delattr(self, a)
642 self.tagscache = None
642 self.tagscache = None
643 self._tagstypecache = None
643 self._tagstypecache = None
644 self.nodetagscache = None
644 self.nodetagscache = None
645 self.branchcache = None
645 self.branchcache = None
646 self._ubranchcache = None
646 self._ubranchcache = None
647 self._branchcachetip = None
647 self._branchcachetip = None
648
648
649 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
649 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
650 try:
650 try:
651 l = lock.lock(lockname, 0, releasefn, desc=desc)
651 l = lock.lock(lockname, 0, releasefn, desc=desc)
652 except lock.LockHeld, inst:
652 except lock.LockHeld, inst:
653 if not wait:
653 if not wait:
654 raise
654 raise
655 self.ui.warn(_("waiting for lock on %s held by %r\n") %
655 self.ui.warn(_("waiting for lock on %s held by %r\n") %
656 (desc, inst.locker))
656 (desc, inst.locker))
657 # default to 600 seconds timeout
657 # default to 600 seconds timeout
658 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
658 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
659 releasefn, desc=desc)
659 releasefn, desc=desc)
660 if acquirefn:
660 if acquirefn:
661 acquirefn()
661 acquirefn()
662 return l
662 return l
663
663
664 def lock(self, wait=True):
664 def lock(self, wait=True):
665 if self._lockref and self._lockref():
665 if self._lockref and self._lockref():
666 return self._lockref()
666 return self._lockref()
667
667
668 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
668 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
669 _('repository %s') % self.origroot)
669 _('repository %s') % self.origroot)
670 self._lockref = weakref.ref(l)
670 self._lockref = weakref.ref(l)
671 return l
671 return l
672
672
673 def wlock(self, wait=True):
673 def wlock(self, wait=True):
674 if self._wlockref and self._wlockref():
674 if self._wlockref and self._wlockref():
675 return self._wlockref()
675 return self._wlockref()
676
676
677 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
678 self.dirstate.invalidate, _('working directory of %s') %
678 self.dirstate.invalidate, _('working directory of %s') %
679 self.origroot)
679 self.origroot)
680 self._wlockref = weakref.ref(l)
680 self._wlockref = weakref.ref(l)
681 return l
681 return l
682
682
683 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
683 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
684 """
684 """
685 commit an individual file as part of a larger transaction
685 commit an individual file as part of a larger transaction
686 """
686 """
687
687
688 t = self.wread(fn)
688 t = self.wread(fn)
689 fl = self.file(fn)
689 fl = self.file(fn)
690 fp1 = manifest1.get(fn, nullid)
690 fp1 = manifest1.get(fn, nullid)
691 fp2 = manifest2.get(fn, nullid)
691 fp2 = manifest2.get(fn, nullid)
692
692
693 meta = {}
693 meta = {}
694 cp = self.dirstate.copied(fn)
694 cp = self.dirstate.copied(fn)
695 if cp:
695 if cp:
696 # Mark the new revision of this file as a copy of another
696 # Mark the new revision of this file as a copy of another
697 # file. This copy data will effectively act as a parent
697 # file. This copy data will effectively act as a parent
698 # of this new revision. If this is a merge, the first
698 # of this new revision. If this is a merge, the first
699 # parent will be the nullid (meaning "look up the copy data")
699 # parent will be the nullid (meaning "look up the copy data")
700 # and the second one will be the other parent. For example:
700 # and the second one will be the other parent. For example:
701 #
701 #
702 # 0 --- 1 --- 3 rev1 changes file foo
702 # 0 --- 1 --- 3 rev1 changes file foo
703 # \ / rev2 renames foo to bar and changes it
703 # \ / rev2 renames foo to bar and changes it
704 # \- 2 -/ rev3 should have bar with all changes and
704 # \- 2 -/ rev3 should have bar with all changes and
705 # should record that bar descends from
705 # should record that bar descends from
706 # bar in rev2 and foo in rev1
706 # bar in rev2 and foo in rev1
707 #
707 #
708 # this allows this merge to succeed:
708 # this allows this merge to succeed:
709 #
709 #
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
712 # \- 2 --- 4 as the merge base
712 # \- 2 --- 4 as the merge base
713 #
713 #
714 meta["copy"] = cp
714 meta["copy"] = cp
715 if not manifest2: # not a branch merge
715 if not manifest2: # not a branch merge
716 meta["copyrev"] = hex(manifest1[cp])
716 meta["copyrev"] = hex(manifest1[cp])
717 fp2 = nullid
717 fp2 = nullid
718 elif fp2 != nullid: # copied on remote side
718 elif fp2 != nullid: # copied on remote side
719 meta["copyrev"] = hex(manifest1[cp])
719 meta["copyrev"] = hex(manifest1[cp])
720 elif fp1 != nullid: # copied on local side, reversed
720 elif fp1 != nullid: # copied on local side, reversed
721 meta["copyrev"] = hex(manifest2[cp])
721 meta["copyrev"] = hex(manifest2[cp])
722 fp2 = fp1
722 fp2 = fp1
723 elif cp in manifest2: # directory rename on local side
723 elif cp in manifest2: # directory rename on local side
724 meta["copyrev"] = hex(manifest2[cp])
724 meta["copyrev"] = hex(manifest2[cp])
725 else: # directory rename on remote side
725 else: # directory rename on remote side
726 meta["copyrev"] = hex(manifest1[cp])
726 meta["copyrev"] = hex(manifest1[cp])
727 self.ui.debug(_(" %s: copy %s:%s\n") %
727 self.ui.debug(_(" %s: copy %s:%s\n") %
728 (fn, cp, meta["copyrev"]))
728 (fn, cp, meta["copyrev"]))
729 fp1 = nullid
729 fp1 = nullid
730 elif fp2 != nullid:
730 elif fp2 != nullid:
731 # is one parent an ancestor of the other?
731 # is one parent an ancestor of the other?
732 fpa = fl.ancestor(fp1, fp2)
732 fpa = fl.ancestor(fp1, fp2)
733 if fpa == fp1:
733 if fpa == fp1:
734 fp1, fp2 = fp2, nullid
734 fp1, fp2 = fp2, nullid
735 elif fpa == fp2:
735 elif fpa == fp2:
736 fp2 = nullid
736 fp2 = nullid
737
737
738 # is the file unmodified from the parent? report existing entry
738 # is the file unmodified from the parent? report existing entry
739 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
739 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
740 return fp1
740 return fp1
741
741
742 changelist.append(fn)
742 changelist.append(fn)
743 return fl.add(t, meta, tr, linkrev, fp1, fp2)
743 return fl.add(t, meta, tr, linkrev, fp1, fp2)
744
744
745 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
745 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
746 if p1 is None:
746 if p1 is None:
747 p1, p2 = self.dirstate.parents()
747 p1, p2 = self.dirstate.parents()
748 return self.commit(files=files, text=text, user=user, date=date,
748 return self.commit(files=files, text=text, user=user, date=date,
749 p1=p1, p2=p2, extra=extra, empty_ok=True)
749 p1=p1, p2=p2, extra=extra, empty_ok=True)
750
750
751 def commit(self, files=None, text="", user=None, date=None,
751 def commit(self, files=None, text="", user=None, date=None,
752 match=None, force=False, force_editor=False,
752 match=None, force=False, force_editor=False,
753 p1=None, p2=None, extra={}, empty_ok=False):
753 p1=None, p2=None, extra={}, empty_ok=False):
754 wlock = lock = tr = None
754 wlock = lock = tr = None
755 valid = 0 # don't save the dirstate if this isn't set
755 valid = 0 # don't save the dirstate if this isn't set
756 if files:
756 if files:
757 files = util.unique(files)
757 files = util.unique(files)
758 try:
758 try:
759 wlock = self.wlock()
759 wlock = self.wlock()
760 lock = self.lock()
760 lock = self.lock()
761 commit = []
761 commit = []
762 remove = []
762 remove = []
763 changed = []
763 changed = []
764 use_dirstate = (p1 is None) # not rawcommit
764 use_dirstate = (p1 is None) # not rawcommit
765 extra = extra.copy()
765 extra = extra.copy()
766
766
767 if use_dirstate:
767 if use_dirstate:
768 if files:
768 if files:
769 for f in files:
769 for f in files:
770 s = self.dirstate[f]
770 s = self.dirstate[f]
771 if s in 'nma':
771 if s in 'nma':
772 commit.append(f)
772 commit.append(f)
773 elif s == 'r':
773 elif s == 'r':
774 remove.append(f)
774 remove.append(f)
775 else:
775 else:
776 self.ui.warn(_("%s not tracked!\n") % f)
776 self.ui.warn(_("%s not tracked!\n") % f)
777 else:
777 else:
778 changes = self.status(match=match)[:5]
778 changes = self.status(match=match)[:5]
779 modified, added, removed, deleted, unknown = changes
779 modified, added, removed, deleted, unknown = changes
780 commit = modified + added
780 commit = modified + added
781 remove = removed
781 remove = removed
782 else:
782 else:
783 commit = files
783 commit = files
784
784
785 if use_dirstate:
785 if use_dirstate:
786 p1, p2 = self.dirstate.parents()
786 p1, p2 = self.dirstate.parents()
787 update_dirstate = True
787 update_dirstate = True
788
788
789 if (not force and p2 != nullid and
789 if (not force and p2 != nullid and
790 (match and (match.files() or match.anypats()))):
790 (match and (match.files() or match.anypats()))):
791 raise util.Abort(_('cannot partially commit a merge '
791 raise util.Abort(_('cannot partially commit a merge '
792 '(do not specify files or patterns)'))
792 '(do not specify files or patterns)'))
793 else:
793 else:
794 p1, p2 = p1, p2 or nullid
794 p1, p2 = p1, p2 or nullid
795 update_dirstate = (self.dirstate.parents()[0] == p1)
795 update_dirstate = (self.dirstate.parents()[0] == p1)
796
796
797 c1 = self.changelog.read(p1)
797 c1 = self.changelog.read(p1)
798 c2 = self.changelog.read(p2)
798 c2 = self.changelog.read(p2)
799 m1 = self.manifest.read(c1[0]).copy()
799 m1 = self.manifest.read(c1[0]).copy()
800 m2 = self.manifest.read(c2[0])
800 m2 = self.manifest.read(c2[0])
801
801
802 if use_dirstate:
802 if use_dirstate:
803 branchname = self.workingctx().branch()
803 branchname = self.workingctx().branch()
804 try:
804 try:
805 branchname = branchname.decode('UTF-8').encode('UTF-8')
805 branchname = branchname.decode('UTF-8').encode('UTF-8')
806 except UnicodeDecodeError:
806 except UnicodeDecodeError:
807 raise util.Abort(_('branch name not in UTF-8!'))
807 raise util.Abort(_('branch name not in UTF-8!'))
808 else:
808 else:
809 branchname = ""
809 branchname = ""
810
810
811 if use_dirstate:
811 if use_dirstate:
812 oldname = c1[5].get("branch") # stored in UTF-8
812 oldname = c1[5].get("branch") # stored in UTF-8
813 if (not commit and not remove and not force and p2 == nullid
813 if (not commit and not remove and not force and p2 == nullid
814 and branchname == oldname):
814 and branchname == oldname):
815 self.ui.status(_("nothing changed\n"))
815 self.ui.status(_("nothing changed\n"))
816 return None
816 return None
817
817
818 xp1 = hex(p1)
818 xp1 = hex(p1)
819 if p2 == nullid: xp2 = ''
819 if p2 == nullid: xp2 = ''
820 else: xp2 = hex(p2)
820 else: xp2 = hex(p2)
821
821
822 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
822 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
823
823
824 tr = self.transaction()
824 tr = self.transaction()
825 trp = weakref.proxy(tr)
825 trp = weakref.proxy(tr)
826
826
827 # check in files
827 # check in files
828 new = {}
828 new = {}
829 linkrev = self.changelog.count()
829 linkrev = self.changelog.count()
830 commit.sort()
830 commit.sort()
831 is_exec = util.execfunc(self.root, m1.execf)
831 is_exec = util.execfunc(self.root, m1.execf)
832 is_link = util.linkfunc(self.root, m1.linkf)
832 is_link = util.linkfunc(self.root, m1.linkf)
833 for f in commit:
833 for f in commit:
834 self.ui.note(f + "\n")
834 self.ui.note(f + "\n")
835 try:
835 try:
836 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
836 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
837 new_exec = is_exec(f)
837 new_exec = is_exec(f)
838 new_link = is_link(f)
838 new_link = is_link(f)
839 if ((not changed or changed[-1] != f) and
839 if ((not changed or changed[-1] != f) and
840 m2.get(f) != new[f]):
840 m2.get(f) != new[f]):
841 # mention the file in the changelog if some
841 # mention the file in the changelog if some
842 # flag changed, even if there was no content
842 # flag changed, even if there was no content
843 # change.
843 # change.
844 old_exec = m1.execf(f)
844 old_exec = m1.execf(f)
845 old_link = m1.linkf(f)
845 old_link = m1.linkf(f)
846 if old_exec != new_exec or old_link != new_link:
846 if old_exec != new_exec or old_link != new_link:
847 changed.append(f)
847 changed.append(f)
848 m1.set(f, new_exec, new_link)
848 m1.set(f, new_exec, new_link)
849 if use_dirstate:
849 if use_dirstate:
850 self.dirstate.normal(f)
850 self.dirstate.normal(f)
851
851
852 except (OSError, IOError):
852 except (OSError, IOError):
853 if use_dirstate:
853 if use_dirstate:
854 self.ui.warn(_("trouble committing %s!\n") % f)
854 self.ui.warn(_("trouble committing %s!\n") % f)
855 raise
855 raise
856 else:
856 else:
857 remove.append(f)
857 remove.append(f)
858
858
859 # update manifest
859 # update manifest
860 m1.update(new)
860 m1.update(new)
861 remove.sort()
861 remove.sort()
862 removed = []
862 removed = []
863
863
864 for f in remove:
864 for f in remove:
865 if f in m1:
865 if f in m1:
866 del m1[f]
866 del m1[f]
867 removed.append(f)
867 removed.append(f)
868 elif f in m2:
868 elif f in m2:
869 removed.append(f)
869 removed.append(f)
870 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
870 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
871 (new, removed))
871 (new, removed))
872
872
873 # add changeset
873 # add changeset
874 new = new.keys()
874 new = new.keys()
875 new.sort()
875 new.sort()
876
876
877 user = user or self.ui.username()
877 user = user or self.ui.username()
878 if (not empty_ok and not text) or force_editor:
878 if (not empty_ok and not text) or force_editor:
879 edittext = []
879 edittext = []
880 if text:
880 if text:
881 edittext.append(text)
881 edittext.append(text)
882 edittext.append("")
882 edittext.append("")
883 edittext.append(_("HG: Enter commit message."
883 edittext.append(_("HG: Enter commit message."
884 " Lines beginning with 'HG:' are removed."))
884 " Lines beginning with 'HG:' are removed."))
885 edittext.append("HG: --")
885 edittext.append("HG: --")
886 edittext.append("HG: user: %s" % user)
886 edittext.append("HG: user: %s" % user)
887 if p2 != nullid:
887 if p2 != nullid:
888 edittext.append("HG: branch merge")
888 edittext.append("HG: branch merge")
889 if branchname:
889 if branchname:
890 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
890 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
891 edittext.extend(["HG: changed %s" % f for f in changed])
891 edittext.extend(["HG: changed %s" % f for f in changed])
892 edittext.extend(["HG: removed %s" % f for f in removed])
892 edittext.extend(["HG: removed %s" % f for f in removed])
893 if not changed and not remove:
893 if not changed and not remove:
894 edittext.append("HG: no files changed")
894 edittext.append("HG: no files changed")
895 edittext.append("")
895 edittext.append("")
896 # run editor in the repository root
896 # run editor in the repository root
897 olddir = os.getcwd()
897 olddir = os.getcwd()
898 os.chdir(self.root)
898 os.chdir(self.root)
899 text = self.ui.edit("\n".join(edittext), user)
899 text = self.ui.edit("\n".join(edittext), user)
900 os.chdir(olddir)
900 os.chdir(olddir)
901
901
902 if branchname:
902 if branchname:
903 extra["branch"] = branchname
903 extra["branch"] = branchname
904
904
905 lines = [line.rstrip() for line in text.rstrip().splitlines()]
905 lines = [line.rstrip() for line in text.rstrip().splitlines()]
906 while lines and not lines[0]:
906 while lines and not lines[0]:
907 del lines[0]
907 del lines[0]
908 if not lines and use_dirstate:
908 if not lines and use_dirstate:
909 raise util.Abort(_("empty commit message"))
909 raise util.Abort(_("empty commit message"))
910 text = '\n'.join(lines)
910 text = '\n'.join(lines)
911
911
912 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
912 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
913 user, date, extra)
913 user, date, extra)
914 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
914 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
915 parent2=xp2)
915 parent2=xp2)
916 tr.close()
916 tr.close()
917
917
918 if self.branchcache:
918 if self.branchcache:
919 self.branchtags()
919 self.branchtags()
920
920
921 if use_dirstate or update_dirstate:
921 if use_dirstate or update_dirstate:
922 self.dirstate.setparents(n)
922 self.dirstate.setparents(n)
923 if use_dirstate:
923 if use_dirstate:
924 for f in removed:
924 for f in removed:
925 self.dirstate.forget(f)
925 self.dirstate.forget(f)
926 valid = 1 # our dirstate updates are complete
926 valid = 1 # our dirstate updates are complete
927
927
928 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
928 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
929 return n
929 return n
930 finally:
930 finally:
931 if not valid: # don't save our updated dirstate
931 if not valid: # don't save our updated dirstate
932 self.dirstate.invalidate()
932 self.dirstate.invalidate()
933 del tr, lock, wlock
933 del tr, lock, wlock
934
934
935 def walk(self, match, node=None):
935 def walk(self, match, node=None):
936 '''
936 '''
937 walk recursively through the directory tree or a given
937 walk recursively through the directory tree or a given
938 changeset, finding all files matched by the match
938 changeset, finding all files matched by the match
939 function
939 function
940 '''
940 '''
941
941
942 if node:
942 if node:
943 fdict = dict.fromkeys(match.files())
943 fdict = dict.fromkeys(match.files())
944 # for dirstate.walk, files=['.'] means "walk the whole tree".
944 # for dirstate.walk, files=['.'] means "walk the whole tree".
945 # follow that here, too
945 # follow that here, too
946 fdict.pop('.', None)
946 fdict.pop('.', None)
947 mdict = self.manifest.read(self.changelog.read(node)[0])
947 mdict = self.manifest.read(self.changelog.read(node)[0])
948 mfiles = mdict.keys()
948 mfiles = mdict.keys()
949 mfiles.sort()
949 mfiles.sort()
950 for fn in mfiles:
950 for fn in mfiles:
951 for ffn in fdict:
951 for ffn in fdict:
952 # match if the file is the exact name or a directory
952 # match if the file is the exact name or a directory
953 if ffn == fn or fn.startswith("%s/" % ffn):
953 if ffn == fn or fn.startswith("%s/" % ffn):
954 del fdict[ffn]
954 del fdict[ffn]
955 break
955 break
956 if match(fn):
956 if match(fn):
957 yield fn
957 yield fn
958 ffiles = fdict.keys()
958 ffiles = fdict.keys()
959 ffiles.sort()
959 ffiles.sort()
960 for fn in ffiles:
960 for fn in ffiles:
961 if match.bad(fn, 'No such file in rev ' + short(node)) \
961 if match.bad(fn, 'No such file in rev ' + short(node)) \
962 and match(fn):
962 and match(fn):
963 yield fn
963 yield fn
964 else:
964 else:
965 for fn in self.dirstate.walk(match):
965 for fn in self.dirstate.walk(match):
966 yield fn
966 yield fn
967
967
968 def status(self, node1=None, node2=None, match=None,
968 def status(self, node1=None, node2=None, match=None,
969 list_ignored=False, list_clean=False, list_unknown=True):
969 list_ignored=False, list_clean=False, list_unknown=True):
970 """return status of files between two nodes or node and working directory
970 """return status of files between two nodes or node and working directory
971
971
972 If node1 is None, use the first dirstate parent instead.
972 If node1 is None, use the first dirstate parent instead.
973 If node2 is None, compare node1 with working directory.
973 If node2 is None, compare node1 with working directory.
974 """
974 """
975
975
976 def fcmp(fn, getnode):
976 def fcmp(fn, getnode):
977 t1 = self.wread(fn)
977 t1 = self.wread(fn)
978 return self.file(fn).cmp(getnode(fn), t1)
978 return self.file(fn).cmp(getnode(fn), t1)
979
979
980 def mfmatches(node):
980 def mfmatches(node):
981 change = self.changelog.read(node)
981 change = self.changelog.read(node)
982 mf = self.manifest.read(change[0]).copy()
982 mf = self.manifest.read(change[0]).copy()
983 for fn in mf.keys():
983 for fn in mf.keys():
984 if not match(fn):
984 if not match(fn):
985 del mf[fn]
985 del mf[fn]
986 return mf
986 return mf
987
987
988 if not match:
988 if not match:
989 match = match_.always(self.root, self.getcwd())
989 match = match_.always(self.root, self.getcwd())
990
990
991 modified, added, removed, deleted, unknown = [], [], [], [], []
991 modified, added, removed, deleted, unknown = [], [], [], [], []
992 ignored, clean = [], []
992 ignored, clean = [], []
993
993
994 compareworking = False
994 compareworking = False
995 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
995 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
996 compareworking = True
996 compareworking = True
997
997
998 if not compareworking:
998 if not compareworking:
999 # read the manifest from node1 before the manifest from node2,
999 # read the manifest from node1 before the manifest from node2,
1000 # so that we'll hit the manifest cache if we're going through
1000 # so that we'll hit the manifest cache if we're going through
1001 # all the revisions in parent->child order.
1001 # all the revisions in parent->child order.
1002 mf1 = mfmatches(node1)
1002 mf1 = mfmatches(node1)
1003
1003
1004 # are we comparing the working directory?
1004 # are we comparing the working directory?
1005 if not node2:
1005 if not node2:
1006 (lookup, modified, added, removed, deleted, unknown,
1006 (lookup, modified, added, removed, deleted, unknown,
1007 ignored, clean) = self.dirstate.status(match, list_ignored,
1007 ignored, clean) = self.dirstate.status(match, list_ignored,
1008 list_clean, list_unknown)
1008 list_clean, list_unknown)
1009 # are we comparing working dir against its parent?
1009 # are we comparing working dir against its parent?
1010 if compareworking:
1010 if compareworking:
1011 if lookup:
1011 if lookup:
1012 fixup = []
1012 fixup = []
1013 # do a full compare of any files that might have changed
1013 # do a full compare of any files that might have changed
1014 ctx = self.changectx()
1014 ctx = self.changectx()
1015 mexec = lambda f: 'x' in ctx.fileflags(f)
1015 mexec = lambda f: 'x' in ctx.fileflags(f)
1016 mlink = lambda f: 'l' in ctx.fileflags(f)
1016 mlink = lambda f: 'l' in ctx.fileflags(f)
1017 is_exec = util.execfunc(self.root, mexec)
1017 is_exec = util.execfunc(self.root, mexec)
1018 is_link = util.linkfunc(self.root, mlink)
1018 is_link = util.linkfunc(self.root, mlink)
1019 def flags(f):
1019 def flags(f):
1020 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1020 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1021 for f in lookup:
1021 for f in lookup:
1022 if (f not in ctx or flags(f) != ctx.fileflags(f)
1022 if (f not in ctx or flags(f) != ctx.fileflags(f)
1023 or ctx[f].cmp(self.wread(f))):
1023 or ctx[f].cmp(self.wread(f))):
1024 modified.append(f)
1024 modified.append(f)
1025 else:
1025 else:
1026 fixup.append(f)
1026 fixup.append(f)
1027 if list_clean:
1027 if list_clean:
1028 clean.append(f)
1028 clean.append(f)
1029
1029
1030 # update dirstate for files that are actually clean
1030 # update dirstate for files that are actually clean
1031 if fixup:
1031 if fixup:
1032 wlock = None
1032 wlock = None
1033 try:
1033 try:
1034 try:
1034 try:
1035 wlock = self.wlock(False)
1035 wlock = self.wlock(False)
1036 except lock.LockException:
1036 except lock.LockException:
1037 pass
1037 pass
1038 if wlock:
1038 if wlock:
1039 for f in fixup:
1039 for f in fixup:
1040 self.dirstate.normal(f)
1040 self.dirstate.normal(f)
1041 finally:
1041 finally:
1042 del wlock
1042 del wlock
1043 else:
1043 else:
1044 # we are comparing working dir against non-parent
1044 # we are comparing working dir against non-parent
1045 # generate a pseudo-manifest for the working dir
1045 # generate a pseudo-manifest for the working dir
1046 # XXX: create it in dirstate.py ?
1046 # XXX: create it in dirstate.py ?
1047 mf2 = mfmatches(self.dirstate.parents()[0])
1047 mf2 = mfmatches(self.dirstate.parents()[0])
1048 is_exec = util.execfunc(self.root, mf2.execf)
1048 is_exec = util.execfunc(self.root, mf2.execf)
1049 is_link = util.linkfunc(self.root, mf2.linkf)
1049 is_link = util.linkfunc(self.root, mf2.linkf)
1050 for f in lookup + modified + added:
1050 for f in lookup + modified + added:
1051 mf2[f] = ""
1051 mf2[f] = ""
1052 mf2.set(f, is_exec(f), is_link(f))
1052 mf2.set(f, is_exec(f), is_link(f))
1053 for f in removed:
1053 for f in removed:
1054 if f in mf2:
1054 if f in mf2:
1055 del mf2[f]
1055 del mf2[f]
1056
1056
1057 else:
1057 else:
1058 # we are comparing two revisions
1058 # we are comparing two revisions
1059 mf2 = mfmatches(node2)
1059 mf2 = mfmatches(node2)
1060
1060
1061 if not compareworking:
1061 if not compareworking:
1062 # flush lists from dirstate before comparing manifests
1062 # flush lists from dirstate before comparing manifests
1063 modified, added, clean = [], [], []
1063 modified, added, clean = [], [], []
1064
1064
1065 # make sure to sort the files so we talk to the disk in a
1065 # make sure to sort the files so we talk to the disk in a
1066 # reasonable order
1066 # reasonable order
1067 mf2keys = mf2.keys()
1067 mf2keys = mf2.keys()
1068 mf2keys.sort()
1068 mf2keys.sort()
1069 getnode = lambda fn: mf1.get(fn, nullid)
1069 getnode = lambda fn: mf1.get(fn, nullid)
1070 for fn in mf2keys:
1070 for fn in mf2keys:
1071 if fn in mf1:
1071 if fn in mf1:
1072 if (mf1.flags(fn) != mf2.flags(fn) or
1072 if (mf1.flags(fn) != mf2.flags(fn) or
1073 (mf1[fn] != mf2[fn] and
1073 (mf1[fn] != mf2[fn] and
1074 (mf2[fn] != "" or fcmp(fn, getnode)))):
1074 (mf2[fn] != "" or fcmp(fn, getnode)))):
1075 modified.append(fn)
1075 modified.append(fn)
1076 elif list_clean:
1076 elif list_clean:
1077 clean.append(fn)
1077 clean.append(fn)
1078 del mf1[fn]
1078 del mf1[fn]
1079 else:
1079 else:
1080 added.append(fn)
1080 added.append(fn)
1081
1081
1082 removed = mf1.keys()
1082 removed = mf1.keys()
1083
1083
1084 # sort and return results:
1084 # sort and return results:
1085 for l in modified, added, removed, deleted, unknown, ignored, clean:
1085 for l in modified, added, removed, deleted, unknown, ignored, clean:
1086 l.sort()
1086 l.sort()
1087 return (modified, added, removed, deleted, unknown, ignored, clean)
1087 return (modified, added, removed, deleted, unknown, ignored, clean)
1088
1088
1089 def add(self, list):
1089 def add(self, list):
1090 wlock = self.wlock()
1090 wlock = self.wlock()
1091 try:
1091 try:
1092 rejected = []
1092 rejected = []
1093 for f in list:
1093 for f in list:
1094 p = self.wjoin(f)
1094 p = self.wjoin(f)
1095 try:
1095 try:
1096 st = os.lstat(p)
1096 st = os.lstat(p)
1097 except:
1097 except:
1098 self.ui.warn(_("%s does not exist!\n") % f)
1098 self.ui.warn(_("%s does not exist!\n") % f)
1099 rejected.append(f)
1099 rejected.append(f)
1100 continue
1100 continue
1101 if st.st_size > 10000000:
1101 if st.st_size > 10000000:
1102 self.ui.warn(_("%s: files over 10MB may cause memory and"
1102 self.ui.warn(_("%s: files over 10MB may cause memory and"
1103 " performance problems\n"
1103 " performance problems\n"
1104 "(use 'hg revert %s' to unadd the file)\n")
1104 "(use 'hg revert %s' to unadd the file)\n")
1105 % (f, f))
1105 % (f, f))
1106 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1106 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1107 self.ui.warn(_("%s not added: only files and symlinks "
1107 self.ui.warn(_("%s not added: only files and symlinks "
1108 "supported currently\n") % f)
1108 "supported currently\n") % f)
1109 rejected.append(p)
1109 rejected.append(p)
1110 elif self.dirstate[f] in 'amn':
1110 elif self.dirstate[f] in 'amn':
1111 self.ui.warn(_("%s already tracked!\n") % f)
1111 self.ui.warn(_("%s already tracked!\n") % f)
1112 elif self.dirstate[f] == 'r':
1112 elif self.dirstate[f] == 'r':
1113 self.dirstate.normallookup(f)
1113 self.dirstate.normallookup(f)
1114 else:
1114 else:
1115 self.dirstate.add(f)
1115 self.dirstate.add(f)
1116 return rejected
1116 return rejected
1117 finally:
1117 finally:
1118 del wlock
1118 del wlock
1119
1119
1120 def forget(self, list):
1120 def forget(self, list):
1121 wlock = self.wlock()
1121 wlock = self.wlock()
1122 try:
1122 try:
1123 for f in list:
1123 for f in list:
1124 if self.dirstate[f] != 'a':
1124 if self.dirstate[f] != 'a':
1125 self.ui.warn(_("%s not added!\n") % f)
1125 self.ui.warn(_("%s not added!\n") % f)
1126 else:
1126 else:
1127 self.dirstate.forget(f)
1127 self.dirstate.forget(f)
1128 finally:
1128 finally:
1129 del wlock
1129 del wlock
1130
1130
1131 def remove(self, list, unlink=False):
1131 def remove(self, list, unlink=False):
1132 wlock = None
1132 wlock = None
1133 try:
1133 try:
1134 if unlink:
1134 if unlink:
1135 for f in list:
1135 for f in list:
1136 try:
1136 try:
1137 util.unlink(self.wjoin(f))
1137 util.unlink(self.wjoin(f))
1138 except OSError, inst:
1138 except OSError, inst:
1139 if inst.errno != errno.ENOENT:
1139 if inst.errno != errno.ENOENT:
1140 raise
1140 raise
1141 wlock = self.wlock()
1141 wlock = self.wlock()
1142 for f in list:
1142 for f in list:
1143 if unlink and os.path.exists(self.wjoin(f)):
1143 if unlink and os.path.exists(self.wjoin(f)):
1144 self.ui.warn(_("%s still exists!\n") % f)
1144 self.ui.warn(_("%s still exists!\n") % f)
1145 elif self.dirstate[f] == 'a':
1145 elif self.dirstate[f] == 'a':
1146 self.dirstate.forget(f)
1146 self.dirstate.forget(f)
1147 elif f not in self.dirstate:
1147 elif f not in self.dirstate:
1148 self.ui.warn(_("%s not tracked!\n") % f)
1148 self.ui.warn(_("%s not tracked!\n") % f)
1149 else:
1149 else:
1150 self.dirstate.remove(f)
1150 self.dirstate.remove(f)
1151 finally:
1151 finally:
1152 del wlock
1152 del wlock
1153
1153
1154 def undelete(self, list):
1154 def undelete(self, list):
1155 wlock = None
1155 wlock = None
1156 try:
1156 try:
1157 manifests = [self.manifest.read(self.changelog.read(p)[0])
1157 manifests = [self.manifest.read(self.changelog.read(p)[0])
1158 for p in self.dirstate.parents() if p != nullid]
1158 for p in self.dirstate.parents() if p != nullid]
1159 wlock = self.wlock()
1159 wlock = self.wlock()
1160 for f in list:
1160 for f in list:
1161 if self.dirstate[f] != 'r':
1161 if self.dirstate[f] != 'r':
1162 self.ui.warn("%s not removed!\n" % f)
1162 self.ui.warn("%s not removed!\n" % f)
1163 else:
1163 else:
1164 m = f in manifests[0] and manifests[0] or manifests[1]
1164 m = f in manifests[0] and manifests[0] or manifests[1]
1165 t = self.file(f).read(m[f])
1165 t = self.file(f).read(m[f])
1166 self.wwrite(f, t, m.flags(f))
1166 self.wwrite(f, t, m.flags(f))
1167 self.dirstate.normal(f)
1167 self.dirstate.normal(f)
1168 finally:
1168 finally:
1169 del wlock
1169 del wlock
1170
1170
1171 def copy(self, source, dest):
1171 def copy(self, source, dest):
1172 wlock = None
1172 wlock = None
1173 try:
1173 try:
1174 p = self.wjoin(dest)
1174 p = self.wjoin(dest)
1175 if not (os.path.exists(p) or os.path.islink(p)):
1175 if not (os.path.exists(p) or os.path.islink(p)):
1176 self.ui.warn(_("%s does not exist!\n") % dest)
1176 self.ui.warn(_("%s does not exist!\n") % dest)
1177 elif not (os.path.isfile(p) or os.path.islink(p)):
1177 elif not (os.path.isfile(p) or os.path.islink(p)):
1178 self.ui.warn(_("copy failed: %s is not a file or a "
1178 self.ui.warn(_("copy failed: %s is not a file or a "
1179 "symbolic link\n") % dest)
1179 "symbolic link\n") % dest)
1180 else:
1180 else:
1181 wlock = self.wlock()
1181 wlock = self.wlock()
1182 if dest not in self.dirstate:
1182 if dest not in self.dirstate:
1183 self.dirstate.add(dest)
1183 self.dirstate.add(dest)
1184 self.dirstate.copy(source, dest)
1184 self.dirstate.copy(source, dest)
1185 finally:
1185 finally:
1186 del wlock
1186 del wlock
1187
1187
1188 def heads(self, start=None):
1188 def heads(self, start=None):
1189 heads = self.changelog.heads(start)
1189 heads = self.changelog.heads(start)
1190 # sort the output in rev descending order
1190 # sort the output in rev descending order
1191 heads = [(-self.changelog.rev(h), h) for h in heads]
1191 heads = [(-self.changelog.rev(h), h) for h in heads]
1192 heads.sort()
1192 heads.sort()
1193 return [n for (r, n) in heads]
1193 return [n for (r, n) in heads]
1194
1194
1195 def branchheads(self, branch, start=None):
1195 def branchheads(self, branch, start=None):
1196 branches = self.branchtags()
1196 branches = self.branchtags()
1197 if branch not in branches:
1197 if branch not in branches:
1198 return []
1198 return []
1199 # The basic algorithm is this:
1199 # The basic algorithm is this:
1200 #
1200 #
1201 # Start from the branch tip since there are no later revisions that can
1201 # Start from the branch tip since there are no later revisions that can
1202 # possibly be in this branch, and the tip is a guaranteed head.
1202 # possibly be in this branch, and the tip is a guaranteed head.
1203 #
1203 #
1204 # Remember the tip's parents as the first ancestors, since these by
1204 # Remember the tip's parents as the first ancestors, since these by
1205 # definition are not heads.
1205 # definition are not heads.
1206 #
1206 #
1207 # Step backwards from the brach tip through all the revisions. We are
1207 # Step backwards from the brach tip through all the revisions. We are
1208 # guaranteed by the rules of Mercurial that we will now be visiting the
1208 # guaranteed by the rules of Mercurial that we will now be visiting the
1209 # nodes in reverse topological order (children before parents).
1209 # nodes in reverse topological order (children before parents).
1210 #
1210 #
1211 # If a revision is one of the ancestors of a head then we can toss it
1211 # If a revision is one of the ancestors of a head then we can toss it
1212 # out of the ancestors set (we've already found it and won't be
1212 # out of the ancestors set (we've already found it and won't be
1213 # visiting it again) and put its parents in the ancestors set.
1213 # visiting it again) and put its parents in the ancestors set.
1214 #
1214 #
1215 # Otherwise, if a revision is in the branch it's another head, since it
1215 # Otherwise, if a revision is in the branch it's another head, since it
1216 # wasn't in the ancestor list of an existing head. So add it to the
1216 # wasn't in the ancestor list of an existing head. So add it to the
1217 # head list, and add its parents to the ancestor list.
1217 # head list, and add its parents to the ancestor list.
1218 #
1218 #
1219 # If it is not in the branch ignore it.
1219 # If it is not in the branch ignore it.
1220 #
1220 #
1221 # Once we have a list of heads, use nodesbetween to filter out all the
1221 # Once we have a list of heads, use nodesbetween to filter out all the
1222 # heads that cannot be reached from startrev. There may be a more
1222 # heads that cannot be reached from startrev. There may be a more
1223 # efficient way to do this as part of the previous algorithm.
1223 # efficient way to do this as part of the previous algorithm.
1224
1224
1225 set = util.set
1225 set = util.set
1226 heads = [self.changelog.rev(branches[branch])]
1226 heads = [self.changelog.rev(branches[branch])]
1227 # Don't care if ancestors contains nullrev or not.
1227 # Don't care if ancestors contains nullrev or not.
1228 ancestors = set(self.changelog.parentrevs(heads[0]))
1228 ancestors = set(self.changelog.parentrevs(heads[0]))
1229 for rev in xrange(heads[0] - 1, nullrev, -1):
1229 for rev in xrange(heads[0] - 1, nullrev, -1):
1230 if rev in ancestors:
1230 if rev in ancestors:
1231 ancestors.update(self.changelog.parentrevs(rev))
1231 ancestors.update(self.changelog.parentrevs(rev))
1232 ancestors.remove(rev)
1232 ancestors.remove(rev)
1233 elif self.changectx(rev).branch() == branch:
1233 elif self.changectx(rev).branch() == branch:
1234 heads.append(rev)
1234 heads.append(rev)
1235 ancestors.update(self.changelog.parentrevs(rev))
1235 ancestors.update(self.changelog.parentrevs(rev))
1236 heads = [self.changelog.node(rev) for rev in heads]
1236 heads = [self.changelog.node(rev) for rev in heads]
1237 if start is not None:
1237 if start is not None:
1238 heads = self.changelog.nodesbetween([start], heads)[2]
1238 heads = self.changelog.nodesbetween([start], heads)[2]
1239 return heads
1239 return heads
1240
1240
1241 def branches(self, nodes):
1241 def branches(self, nodes):
1242 if not nodes:
1242 if not nodes:
1243 nodes = [self.changelog.tip()]
1243 nodes = [self.changelog.tip()]
1244 b = []
1244 b = []
1245 for n in nodes:
1245 for n in nodes:
1246 t = n
1246 t = n
1247 while 1:
1247 while 1:
1248 p = self.changelog.parents(n)
1248 p = self.changelog.parents(n)
1249 if p[1] != nullid or p[0] == nullid:
1249 if p[1] != nullid or p[0] == nullid:
1250 b.append((t, n, p[0], p[1]))
1250 b.append((t, n, p[0], p[1]))
1251 break
1251 break
1252 n = p[0]
1252 n = p[0]
1253 return b
1253 return b
1254
1254
1255 def between(self, pairs):
1255 def between(self, pairs):
1256 r = []
1256 r = []
1257
1257
1258 for top, bottom in pairs:
1258 for top, bottom in pairs:
1259 n, l, i = top, [], 0
1259 n, l, i = top, [], 0
1260 f = 1
1260 f = 1
1261
1261
1262 while n != bottom:
1262 while n != bottom:
1263 p = self.changelog.parents(n)[0]
1263 p = self.changelog.parents(n)[0]
1264 if i == f:
1264 if i == f:
1265 l.append(n)
1265 l.append(n)
1266 f = f * 2
1266 f = f * 2
1267 n = p
1267 n = p
1268 i += 1
1268 i += 1
1269
1269
1270 r.append(l)
1270 r.append(l)
1271
1271
1272 return r
1272 return r
1273
1273
1274 def findincoming(self, remote, base=None, heads=None, force=False):
1274 def findincoming(self, remote, base=None, heads=None, force=False):
1275 """Return list of roots of the subsets of missing nodes from remote
1275 """Return list of roots of the subsets of missing nodes from remote
1276
1276
1277 If base dict is specified, assume that these nodes and their parents
1277 If base dict is specified, assume that these nodes and their parents
1278 exist on the remote side and that no child of a node of base exists
1278 exist on the remote side and that no child of a node of base exists
1279 in both remote and self.
1279 in both remote and self.
1280 Furthermore base will be updated to include the nodes that exists
1280 Furthermore base will be updated to include the nodes that exists
1281 in self and remote but no children exists in self and remote.
1281 in self and remote but no children exists in self and remote.
1282 If a list of heads is specified, return only nodes which are heads
1282 If a list of heads is specified, return only nodes which are heads
1283 or ancestors of these heads.
1283 or ancestors of these heads.
1284
1284
1285 All the ancestors of base are in self and in remote.
1285 All the ancestors of base are in self and in remote.
1286 All the descendants of the list returned are missing in self.
1286 All the descendants of the list returned are missing in self.
1287 (and so we know that the rest of the nodes are missing in remote, see
1287 (and so we know that the rest of the nodes are missing in remote, see
1288 outgoing)
1288 outgoing)
1289 """
1289 """
1290 m = self.changelog.nodemap
1290 m = self.changelog.nodemap
1291 search = []
1291 search = []
1292 fetch = {}
1292 fetch = {}
1293 seen = {}
1293 seen = {}
1294 seenbranch = {}
1294 seenbranch = {}
1295 if base == None:
1295 if base == None:
1296 base = {}
1296 base = {}
1297
1297
1298 if not heads:
1298 if not heads:
1299 heads = remote.heads()
1299 heads = remote.heads()
1300
1300
1301 if self.changelog.tip() == nullid:
1301 if self.changelog.tip() == nullid:
1302 base[nullid] = 1
1302 base[nullid] = 1
1303 if heads != [nullid]:
1303 if heads != [nullid]:
1304 return [nullid]
1304 return [nullid]
1305 return []
1305 return []
1306
1306
1307 # assume we're closer to the tip than the root
1307 # assume we're closer to the tip than the root
1308 # and start by examining the heads
1308 # and start by examining the heads
1309 self.ui.status(_("searching for changes\n"))
1309 self.ui.status(_("searching for changes\n"))
1310
1310
1311 unknown = []
1311 unknown = []
1312 for h in heads:
1312 for h in heads:
1313 if h not in m:
1313 if h not in m:
1314 unknown.append(h)
1314 unknown.append(h)
1315 else:
1315 else:
1316 base[h] = 1
1316 base[h] = 1
1317
1317
1318 if not unknown:
1318 if not unknown:
1319 return []
1319 return []
1320
1320
1321 req = dict.fromkeys(unknown)
1321 req = dict.fromkeys(unknown)
1322 reqcnt = 0
1322 reqcnt = 0
1323
1323
1324 # search through remote branches
1324 # search through remote branches
1325 # a 'branch' here is a linear segment of history, with four parts:
1325 # a 'branch' here is a linear segment of history, with four parts:
1326 # head, root, first parent, second parent
1326 # head, root, first parent, second parent
1327 # (a branch always has two parents (or none) by definition)
1327 # (a branch always has two parents (or none) by definition)
1328 unknown = remote.branches(unknown)
1328 unknown = remote.branches(unknown)
1329 while unknown:
1329 while unknown:
1330 r = []
1330 r = []
1331 while unknown:
1331 while unknown:
1332 n = unknown.pop(0)
1332 n = unknown.pop(0)
1333 if n[0] in seen:
1333 if n[0] in seen:
1334 continue
1334 continue
1335
1335
1336 self.ui.debug(_("examining %s:%s\n")
1336 self.ui.debug(_("examining %s:%s\n")
1337 % (short(n[0]), short(n[1])))
1337 % (short(n[0]), short(n[1])))
1338 if n[0] == nullid: # found the end of the branch
1338 if n[0] == nullid: # found the end of the branch
1339 pass
1339 pass
1340 elif n in seenbranch:
1340 elif n in seenbranch:
1341 self.ui.debug(_("branch already found\n"))
1341 self.ui.debug(_("branch already found\n"))
1342 continue
1342 continue
1343 elif n[1] and n[1] in m: # do we know the base?
1343 elif n[1] and n[1] in m: # do we know the base?
1344 self.ui.debug(_("found incomplete branch %s:%s\n")
1344 self.ui.debug(_("found incomplete branch %s:%s\n")
1345 % (short(n[0]), short(n[1])))
1345 % (short(n[0]), short(n[1])))
1346 search.append(n) # schedule branch range for scanning
1346 search.append(n) # schedule branch range for scanning
1347 seenbranch[n] = 1
1347 seenbranch[n] = 1
1348 else:
1348 else:
1349 if n[1] not in seen and n[1] not in fetch:
1349 if n[1] not in seen and n[1] not in fetch:
1350 if n[2] in m and n[3] in m:
1350 if n[2] in m and n[3] in m:
1351 self.ui.debug(_("found new changeset %s\n") %
1351 self.ui.debug(_("found new changeset %s\n") %
1352 short(n[1]))
1352 short(n[1]))
1353 fetch[n[1]] = 1 # earliest unknown
1353 fetch[n[1]] = 1 # earliest unknown
1354 for p in n[2:4]:
1354 for p in n[2:4]:
1355 if p in m:
1355 if p in m:
1356 base[p] = 1 # latest known
1356 base[p] = 1 # latest known
1357
1357
1358 for p in n[2:4]:
1358 for p in n[2:4]:
1359 if p not in req and p not in m:
1359 if p not in req and p not in m:
1360 r.append(p)
1360 r.append(p)
1361 req[p] = 1
1361 req[p] = 1
1362 seen[n[0]] = 1
1362 seen[n[0]] = 1
1363
1363
1364 if r:
1364 if r:
1365 reqcnt += 1
1365 reqcnt += 1
1366 self.ui.debug(_("request %d: %s\n") %
1366 self.ui.debug(_("request %d: %s\n") %
1367 (reqcnt, " ".join(map(short, r))))
1367 (reqcnt, " ".join(map(short, r))))
1368 for p in xrange(0, len(r), 10):
1368 for p in xrange(0, len(r), 10):
1369 for b in remote.branches(r[p:p+10]):
1369 for b in remote.branches(r[p:p+10]):
1370 self.ui.debug(_("received %s:%s\n") %
1370 self.ui.debug(_("received %s:%s\n") %
1371 (short(b[0]), short(b[1])))
1371 (short(b[0]), short(b[1])))
1372 unknown.append(b)
1372 unknown.append(b)
1373
1373
1374 # do binary search on the branches we found
1374 # do binary search on the branches we found
1375 while search:
1375 while search:
1376 n = search.pop(0)
1376 n = search.pop(0)
1377 reqcnt += 1
1377 reqcnt += 1
1378 l = remote.between([(n[0], n[1])])[0]
1378 l = remote.between([(n[0], n[1])])[0]
1379 l.append(n[1])
1379 l.append(n[1])
1380 p = n[0]
1380 p = n[0]
1381 f = 1
1381 f = 1
1382 for i in l:
1382 for i in l:
1383 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1383 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1384 if i in m:
1384 if i in m:
1385 if f <= 2:
1385 if f <= 2:
1386 self.ui.debug(_("found new branch changeset %s\n") %
1386 self.ui.debug(_("found new branch changeset %s\n") %
1387 short(p))
1387 short(p))
1388 fetch[p] = 1
1388 fetch[p] = 1
1389 base[i] = 1
1389 base[i] = 1
1390 else:
1390 else:
1391 self.ui.debug(_("narrowed branch search to %s:%s\n")
1391 self.ui.debug(_("narrowed branch search to %s:%s\n")
1392 % (short(p), short(i)))
1392 % (short(p), short(i)))
1393 search.append((p, i))
1393 search.append((p, i))
1394 break
1394 break
1395 p, f = i, f * 2
1395 p, f = i, f * 2
1396
1396
1397 # sanity check our fetch list
1397 # sanity check our fetch list
1398 for f in fetch.keys():
1398 for f in fetch.keys():
1399 if f in m:
1399 if f in m:
1400 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1400 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1401
1401
1402 if base.keys() == [nullid]:
1402 if base.keys() == [nullid]:
1403 if force:
1403 if force:
1404 self.ui.warn(_("warning: repository is unrelated\n"))
1404 self.ui.warn(_("warning: repository is unrelated\n"))
1405 else:
1405 else:
1406 raise util.Abort(_("repository is unrelated"))
1406 raise util.Abort(_("repository is unrelated"))
1407
1407
1408 self.ui.debug(_("found new changesets starting at ") +
1408 self.ui.debug(_("found new changesets starting at ") +
1409 " ".join([short(f) for f in fetch]) + "\n")
1409 " ".join([short(f) for f in fetch]) + "\n")
1410
1410
1411 self.ui.debug(_("%d total queries\n") % reqcnt)
1411 self.ui.debug(_("%d total queries\n") % reqcnt)
1412
1412
1413 return fetch.keys()
1413 return fetch.keys()
1414
1414
1415 def findoutgoing(self, remote, base=None, heads=None, force=False):
1415 def findoutgoing(self, remote, base=None, heads=None, force=False):
1416 """Return list of nodes that are roots of subsets not in remote
1416 """Return list of nodes that are roots of subsets not in remote
1417
1417
1418 If base dict is specified, assume that these nodes and their parents
1418 If base dict is specified, assume that these nodes and their parents
1419 exist on the remote side.
1419 exist on the remote side.
1420 If a list of heads is specified, return only nodes which are heads
1420 If a list of heads is specified, return only nodes which are heads
1421 or ancestors of these heads, and return a second element which
1421 or ancestors of these heads, and return a second element which
1422 contains all remote heads which get new children.
1422 contains all remote heads which get new children.
1423 """
1423 """
1424 if base == None:
1424 if base == None:
1425 base = {}
1425 base = {}
1426 self.findincoming(remote, base, heads, force=force)
1426 self.findincoming(remote, base, heads, force=force)
1427
1427
1428 self.ui.debug(_("common changesets up to ")
1428 self.ui.debug(_("common changesets up to ")
1429 + " ".join(map(short, base.keys())) + "\n")
1429 + " ".join(map(short, base.keys())) + "\n")
1430
1430
1431 remain = dict.fromkeys(self.changelog.nodemap)
1431 remain = dict.fromkeys(self.changelog.nodemap)
1432
1432
1433 # prune everything remote has from the tree
1433 # prune everything remote has from the tree
1434 del remain[nullid]
1434 del remain[nullid]
1435 remove = base.keys()
1435 remove = base.keys()
1436 while remove:
1436 while remove:
1437 n = remove.pop(0)
1437 n = remove.pop(0)
1438 if n in remain:
1438 if n in remain:
1439 del remain[n]
1439 del remain[n]
1440 for p in self.changelog.parents(n):
1440 for p in self.changelog.parents(n):
1441 remove.append(p)
1441 remove.append(p)
1442
1442
1443 # find every node whose parents have been pruned
1443 # find every node whose parents have been pruned
1444 subset = []
1444 subset = []
1445 # find every remote head that will get new children
1445 # find every remote head that will get new children
1446 updated_heads = {}
1446 updated_heads = {}
1447 for n in remain:
1447 for n in remain:
1448 p1, p2 = self.changelog.parents(n)
1448 p1, p2 = self.changelog.parents(n)
1449 if p1 not in remain and p2 not in remain:
1449 if p1 not in remain and p2 not in remain:
1450 subset.append(n)
1450 subset.append(n)
1451 if heads:
1451 if heads:
1452 if p1 in heads:
1452 if p1 in heads:
1453 updated_heads[p1] = True
1453 updated_heads[p1] = True
1454 if p2 in heads:
1454 if p2 in heads:
1455 updated_heads[p2] = True
1455 updated_heads[p2] = True
1456
1456
1457 # this is the set of all roots we have to push
1457 # this is the set of all roots we have to push
1458 if heads:
1458 if heads:
1459 return subset, updated_heads.keys()
1459 return subset, updated_heads.keys()
1460 else:
1460 else:
1461 return subset
1461 return subset
1462
1462
1463 def pull(self, remote, heads=None, force=False):
1463 def pull(self, remote, heads=None, force=False):
1464 lock = self.lock()
1464 lock = self.lock()
1465 try:
1465 try:
1466 fetch = self.findincoming(remote, heads=heads, force=force)
1466 fetch = self.findincoming(remote, heads=heads, force=force)
1467 if fetch == [nullid]:
1467 if fetch == [nullid]:
1468 self.ui.status(_("requesting all changes\n"))
1468 self.ui.status(_("requesting all changes\n"))
1469
1469
1470 if not fetch:
1470 if not fetch:
1471 self.ui.status(_("no changes found\n"))
1471 self.ui.status(_("no changes found\n"))
1472 return 0
1472 return 0
1473
1473
1474 if heads is None:
1474 if heads is None:
1475 cg = remote.changegroup(fetch, 'pull')
1475 cg = remote.changegroup(fetch, 'pull')
1476 else:
1476 else:
1477 if 'changegroupsubset' not in remote.capabilities:
1477 if 'changegroupsubset' not in remote.capabilities:
1478 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1478 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1479 cg = remote.changegroupsubset(fetch, heads, 'pull')
1479 cg = remote.changegroupsubset(fetch, heads, 'pull')
1480 return self.addchangegroup(cg, 'pull', remote.url())
1480 return self.addchangegroup(cg, 'pull', remote.url())
1481 finally:
1481 finally:
1482 del lock
1482 del lock
1483
1483
1484 def push(self, remote, force=False, revs=None):
1484 def push(self, remote, force=False, revs=None):
1485 # there are two ways to push to remote repo:
1485 # there are two ways to push to remote repo:
1486 #
1486 #
1487 # addchangegroup assumes local user can lock remote
1487 # addchangegroup assumes local user can lock remote
1488 # repo (local filesystem, old ssh servers).
1488 # repo (local filesystem, old ssh servers).
1489 #
1489 #
1490 # unbundle assumes local user cannot lock remote repo (new ssh
1490 # unbundle assumes local user cannot lock remote repo (new ssh
1491 # servers, http servers).
1491 # servers, http servers).
1492
1492
1493 if remote.capable('unbundle'):
1493 if remote.capable('unbundle'):
1494 return self.push_unbundle(remote, force, revs)
1494 return self.push_unbundle(remote, force, revs)
1495 return self.push_addchangegroup(remote, force, revs)
1495 return self.push_addchangegroup(remote, force, revs)
1496
1496
1497 def prepush(self, remote, force, revs):
1497 def prepush(self, remote, force, revs):
1498 base = {}
1498 base = {}
1499 remote_heads = remote.heads()
1499 remote_heads = remote.heads()
1500 inc = self.findincoming(remote, base, remote_heads, force=force)
1500 inc = self.findincoming(remote, base, remote_heads, force=force)
1501
1501
1502 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1502 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1503 if revs is not None:
1503 if revs is not None:
1504 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1504 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1505 else:
1505 else:
1506 bases, heads = update, self.changelog.heads()
1506 bases, heads = update, self.changelog.heads()
1507
1507
1508 if not bases:
1508 if not bases:
1509 self.ui.status(_("no changes found\n"))
1509 self.ui.status(_("no changes found\n"))
1510 return None, 1
1510 return None, 1
1511 elif not force:
1511 elif not force:
1512 # check if we're creating new remote heads
1512 # check if we're creating new remote heads
1513 # to be a remote head after push, node must be either
1513 # to be a remote head after push, node must be either
1514 # - unknown locally
1514 # - unknown locally
1515 # - a local outgoing head descended from update
1515 # - a local outgoing head descended from update
1516 # - a remote head that's known locally and not
1516 # - a remote head that's known locally and not
1517 # ancestral to an outgoing head
1517 # ancestral to an outgoing head
1518
1518
1519 warn = 0
1519 warn = 0
1520
1520
1521 if remote_heads == [nullid]:
1521 if remote_heads == [nullid]:
1522 warn = 0
1522 warn = 0
1523 elif not revs and len(heads) > len(remote_heads):
1523 elif not revs and len(heads) > len(remote_heads):
1524 warn = 1
1524 warn = 1
1525 else:
1525 else:
1526 newheads = list(heads)
1526 newheads = list(heads)
1527 for r in remote_heads:
1527 for r in remote_heads:
1528 if r in self.changelog.nodemap:
1528 if r in self.changelog.nodemap:
1529 desc = self.changelog.heads(r, heads)
1529 desc = self.changelog.heads(r, heads)
1530 l = [h for h in heads if h in desc]
1530 l = [h for h in heads if h in desc]
1531 if not l:
1531 if not l:
1532 newheads.append(r)
1532 newheads.append(r)
1533 else:
1533 else:
1534 newheads.append(r)
1534 newheads.append(r)
1535 if len(newheads) > len(remote_heads):
1535 if len(newheads) > len(remote_heads):
1536 warn = 1
1536 warn = 1
1537
1537
1538 if warn:
1538 if warn:
1539 self.ui.warn(_("abort: push creates new remote heads!\n"))
1539 self.ui.warn(_("abort: push creates new remote heads!\n"))
1540 self.ui.status(_("(did you forget to merge?"
1540 self.ui.status(_("(did you forget to merge?"
1541 " use push -f to force)\n"))
1541 " use push -f to force)\n"))
1542 return None, 0
1542 return None, 0
1543 elif inc:
1543 elif inc:
1544 self.ui.warn(_("note: unsynced remote changes!\n"))
1544 self.ui.warn(_("note: unsynced remote changes!\n"))
1545
1545
1546
1546
1547 if revs is None:
1547 if revs is None:
1548 cg = self.changegroup(update, 'push')
1548 cg = self.changegroup(update, 'push')
1549 else:
1549 else:
1550 cg = self.changegroupsubset(update, revs, 'push')
1550 cg = self.changegroupsubset(update, revs, 'push')
1551 return cg, remote_heads
1551 return cg, remote_heads
1552
1552
1553 def push_addchangegroup(self, remote, force, revs):
1553 def push_addchangegroup(self, remote, force, revs):
1554 lock = remote.lock()
1554 lock = remote.lock()
1555 try:
1555 try:
1556 ret = self.prepush(remote, force, revs)
1556 ret = self.prepush(remote, force, revs)
1557 if ret[0] is not None:
1557 if ret[0] is not None:
1558 cg, remote_heads = ret
1558 cg, remote_heads = ret
1559 return remote.addchangegroup(cg, 'push', self.url())
1559 return remote.addchangegroup(cg, 'push', self.url())
1560 return ret[1]
1560 return ret[1]
1561 finally:
1561 finally:
1562 del lock
1562 del lock
1563
1563
1564 def push_unbundle(self, remote, force, revs):
1564 def push_unbundle(self, remote, force, revs):
1565 # local repo finds heads on server, finds out what revs it
1565 # local repo finds heads on server, finds out what revs it
1566 # must push. once revs transferred, if server finds it has
1566 # must push. once revs transferred, if server finds it has
1567 # different heads (someone else won commit/push race), server
1567 # different heads (someone else won commit/push race), server
1568 # aborts.
1568 # aborts.
1569
1569
1570 ret = self.prepush(remote, force, revs)
1570 ret = self.prepush(remote, force, revs)
1571 if ret[0] is not None:
1571 if ret[0] is not None:
1572 cg, remote_heads = ret
1572 cg, remote_heads = ret
1573 if force: remote_heads = ['force']
1573 if force: remote_heads = ['force']
1574 return remote.unbundle(cg, remote_heads, 'push')
1574 return remote.unbundle(cg, remote_heads, 'push')
1575 return ret[1]
1575 return ret[1]
1576
1576
1577 def changegroupinfo(self, nodes, source):
1577 def changegroupinfo(self, nodes, source):
1578 if self.ui.verbose or source == 'bundle':
1578 if self.ui.verbose or source == 'bundle':
1579 self.ui.status(_("%d changesets found\n") % len(nodes))
1579 self.ui.status(_("%d changesets found\n") % len(nodes))
1580 if self.ui.debugflag:
1580 if self.ui.debugflag:
1581 self.ui.debug(_("List of changesets:\n"))
1581 self.ui.debug(_("List of changesets:\n"))
1582 for node in nodes:
1582 for node in nodes:
1583 self.ui.debug("%s\n" % hex(node))
1583 self.ui.debug("%s\n" % hex(node))
1584
1584
1585 def changegroupsubset(self, bases, heads, source, extranodes=None):
1585 def changegroupsubset(self, bases, heads, source, extranodes=None):
1586 """This function generates a changegroup consisting of all the nodes
1586 """This function generates a changegroup consisting of all the nodes
1587 that are descendents of any of the bases, and ancestors of any of
1587 that are descendents of any of the bases, and ancestors of any of
1588 the heads.
1588 the heads.
1589
1589
1590 It is fairly complex as determining which filenodes and which
1590 It is fairly complex as determining which filenodes and which
1591 manifest nodes need to be included for the changeset to be complete
1591 manifest nodes need to be included for the changeset to be complete
1592 is non-trivial.
1592 is non-trivial.
1593
1593
1594 Another wrinkle is doing the reverse, figuring out which changeset in
1594 Another wrinkle is doing the reverse, figuring out which changeset in
1595 the changegroup a particular filenode or manifestnode belongs to.
1595 the changegroup a particular filenode or manifestnode belongs to.
1596
1596
1597 The caller can specify some nodes that must be included in the
1597 The caller can specify some nodes that must be included in the
1598 changegroup using the extranodes argument. It should be a dict
1598 changegroup using the extranodes argument. It should be a dict
1599 where the keys are the filenames (or 1 for the manifest), and the
1599 where the keys are the filenames (or 1 for the manifest), and the
1600 values are lists of (node, linknode) tuples, where node is a wanted
1600 values are lists of (node, linknode) tuples, where node is a wanted
1601 node and linknode is the changelog node that should be transmitted as
1601 node and linknode is the changelog node that should be transmitted as
1602 the linkrev.
1602 the linkrev.
1603 """
1603 """
1604
1604
1605 self.hook('preoutgoing', throw=True, source=source)
1605 self.hook('preoutgoing', throw=True, source=source)
1606
1606
1607 # Set up some initial variables
1607 # Set up some initial variables
1608 # Make it easy to refer to self.changelog
1608 # Make it easy to refer to self.changelog
1609 cl = self.changelog
1609 cl = self.changelog
1610 # msng is short for missing - compute the list of changesets in this
1610 # msng is short for missing - compute the list of changesets in this
1611 # changegroup.
1611 # changegroup.
1612 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1612 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1613 self.changegroupinfo(msng_cl_lst, source)
1613 self.changegroupinfo(msng_cl_lst, source)
1614 # Some bases may turn out to be superfluous, and some heads may be
1614 # Some bases may turn out to be superfluous, and some heads may be
1615 # too. nodesbetween will return the minimal set of bases and heads
1615 # too. nodesbetween will return the minimal set of bases and heads
1616 # necessary to re-create the changegroup.
1616 # necessary to re-create the changegroup.
1617
1617
1618 # Known heads are the list of heads that it is assumed the recipient
1618 # Known heads are the list of heads that it is assumed the recipient
1619 # of this changegroup will know about.
1619 # of this changegroup will know about.
1620 knownheads = {}
1620 knownheads = {}
1621 # We assume that all parents of bases are known heads.
1621 # We assume that all parents of bases are known heads.
1622 for n in bases:
1622 for n in bases:
1623 for p in cl.parents(n):
1623 for p in cl.parents(n):
1624 if p != nullid:
1624 if p != nullid:
1625 knownheads[p] = 1
1625 knownheads[p] = 1
1626 knownheads = knownheads.keys()
1626 knownheads = knownheads.keys()
1627 if knownheads:
1627 if knownheads:
1628 # Now that we know what heads are known, we can compute which
1628 # Now that we know what heads are known, we can compute which
1629 # changesets are known. The recipient must know about all
1629 # changesets are known. The recipient must know about all
1630 # changesets required to reach the known heads from the null
1630 # changesets required to reach the known heads from the null
1631 # changeset.
1631 # changeset.
1632 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1632 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1633 junk = None
1633 junk = None
1634 # Transform the list into an ersatz set.
1634 # Transform the list into an ersatz set.
1635 has_cl_set = dict.fromkeys(has_cl_set)
1635 has_cl_set = dict.fromkeys(has_cl_set)
1636 else:
1636 else:
1637 # If there were no known heads, the recipient cannot be assumed to
1637 # If there were no known heads, the recipient cannot be assumed to
1638 # know about any changesets.
1638 # know about any changesets.
1639 has_cl_set = {}
1639 has_cl_set = {}
1640
1640
1641 # Make it easy to refer to self.manifest
1641 # Make it easy to refer to self.manifest
1642 mnfst = self.manifest
1642 mnfst = self.manifest
1643 # We don't know which manifests are missing yet
1643 # We don't know which manifests are missing yet
1644 msng_mnfst_set = {}
1644 msng_mnfst_set = {}
1645 # Nor do we know which filenodes are missing.
1645 # Nor do we know which filenodes are missing.
1646 msng_filenode_set = {}
1646 msng_filenode_set = {}
1647
1647
1648 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1648 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1649 junk = None
1649 junk = None
1650
1650
1651 # A changeset always belongs to itself, so the changenode lookup
1651 # A changeset always belongs to itself, so the changenode lookup
1652 # function for a changenode is identity.
1652 # function for a changenode is identity.
1653 def identity(x):
1653 def identity(x):
1654 return x
1654 return x
1655
1655
1656 # A function generating function. Sets up an environment for the
1656 # A function generating function. Sets up an environment for the
1657 # inner function.
1657 # inner function.
1658 def cmp_by_rev_func(revlog):
1658 def cmp_by_rev_func(revlog):
1659 # Compare two nodes by their revision number in the environment's
1659 # Compare two nodes by their revision number in the environment's
1660 # revision history. Since the revision number both represents the
1660 # revision history. Since the revision number both represents the
1661 # most efficient order to read the nodes in, and represents a
1661 # most efficient order to read the nodes in, and represents a
1662 # topological sorting of the nodes, this function is often useful.
1662 # topological sorting of the nodes, this function is often useful.
1663 def cmp_by_rev(a, b):
1663 def cmp_by_rev(a, b):
1664 return cmp(revlog.rev(a), revlog.rev(b))
1664 return cmp(revlog.rev(a), revlog.rev(b))
1665 return cmp_by_rev
1665 return cmp_by_rev
1666
1666
1667 # If we determine that a particular file or manifest node must be a
1667 # If we determine that a particular file or manifest node must be a
1668 # node that the recipient of the changegroup will already have, we can
1668 # node that the recipient of the changegroup will already have, we can
1669 # also assume the recipient will have all the parents. This function
1669 # also assume the recipient will have all the parents. This function
1670 # prunes them from the set of missing nodes.
1670 # prunes them from the set of missing nodes.
1671 def prune_parents(revlog, hasset, msngset):
1671 def prune_parents(revlog, hasset, msngset):
1672 haslst = hasset.keys()
1672 haslst = hasset.keys()
1673 haslst.sort(cmp_by_rev_func(revlog))
1673 haslst.sort(cmp_by_rev_func(revlog))
1674 for node in haslst:
1674 for node in haslst:
1675 parentlst = [p for p in revlog.parents(node) if p != nullid]
1675 parentlst = [p for p in revlog.parents(node) if p != nullid]
1676 while parentlst:
1676 while parentlst:
1677 n = parentlst.pop()
1677 n = parentlst.pop()
1678 if n not in hasset:
1678 if n not in hasset:
1679 hasset[n] = 1
1679 hasset[n] = 1
1680 p = [p for p in revlog.parents(n) if p != nullid]
1680 p = [p for p in revlog.parents(n) if p != nullid]
1681 parentlst.extend(p)
1681 parentlst.extend(p)
1682 for n in hasset:
1682 for n in hasset:
1683 msngset.pop(n, None)
1683 msngset.pop(n, None)
1684
1684
1685 # This is a function generating function used to set up an environment
1685 # This is a function generating function used to set up an environment
1686 # for the inner function to execute in.
1686 # for the inner function to execute in.
1687 def manifest_and_file_collector(changedfileset):
1687 def manifest_and_file_collector(changedfileset):
1688 # This is an information gathering function that gathers
1688 # This is an information gathering function that gathers
1689 # information from each changeset node that goes out as part of
1689 # information from each changeset node that goes out as part of
1690 # the changegroup. The information gathered is a list of which
1690 # the changegroup. The information gathered is a list of which
1691 # manifest nodes are potentially required (the recipient may
1691 # manifest nodes are potentially required (the recipient may
1692 # already have them) and total list of all files which were
1692 # already have them) and total list of all files which were
1693 # changed in any changeset in the changegroup.
1693 # changed in any changeset in the changegroup.
1694 #
1694 #
1695 # We also remember the first changenode we saw any manifest
1695 # We also remember the first changenode we saw any manifest
1696 # referenced by so we can later determine which changenode 'owns'
1696 # referenced by so we can later determine which changenode 'owns'
1697 # the manifest.
1697 # the manifest.
1698 def collect_manifests_and_files(clnode):
1698 def collect_manifests_and_files(clnode):
1699 c = cl.read(clnode)
1699 c = cl.read(clnode)
1700 for f in c[3]:
1700 for f in c[3]:
1701 # This is to make sure we only have one instance of each
1701 # This is to make sure we only have one instance of each
1702 # filename string for each filename.
1702 # filename string for each filename.
1703 changedfileset.setdefault(f, f)
1703 changedfileset.setdefault(f, f)
1704 msng_mnfst_set.setdefault(c[0], clnode)
1704 msng_mnfst_set.setdefault(c[0], clnode)
1705 return collect_manifests_and_files
1705 return collect_manifests_and_files
1706
1706
1707 # Figure out which manifest nodes (of the ones we think might be part
1707 # Figure out which manifest nodes (of the ones we think might be part
1708 # of the changegroup) the recipient must know about and remove them
1708 # of the changegroup) the recipient must know about and remove them
1709 # from the changegroup.
1709 # from the changegroup.
1710 def prune_manifests():
1710 def prune_manifests():
1711 has_mnfst_set = {}
1711 has_mnfst_set = {}
1712 for n in msng_mnfst_set:
1712 for n in msng_mnfst_set:
1713 # If a 'missing' manifest thinks it belongs to a changenode
1713 # If a 'missing' manifest thinks it belongs to a changenode
1714 # the recipient is assumed to have, obviously the recipient
1714 # the recipient is assumed to have, obviously the recipient
1715 # must have that manifest.
1715 # must have that manifest.
1716 linknode = cl.node(mnfst.linkrev(n))
1716 linknode = cl.node(mnfst.linkrev(n))
1717 if linknode in has_cl_set:
1717 if linknode in has_cl_set:
1718 has_mnfst_set[n] = 1
1718 has_mnfst_set[n] = 1
1719 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1719 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1720
1720
1721 # Use the information collected in collect_manifests_and_files to say
1721 # Use the information collected in collect_manifests_and_files to say
1722 # which changenode any manifestnode belongs to.
1722 # which changenode any manifestnode belongs to.
1723 def lookup_manifest_link(mnfstnode):
1723 def lookup_manifest_link(mnfstnode):
1724 return msng_mnfst_set[mnfstnode]
1724 return msng_mnfst_set[mnfstnode]
1725
1725
1726 # A function generating function that sets up the initial environment
1726 # A function generating function that sets up the initial environment
1727 # the inner function.
1727 # the inner function.
1728 def filenode_collector(changedfiles):
1728 def filenode_collector(changedfiles):
1729 next_rev = [0]
1729 next_rev = [0]
1730 # This gathers information from each manifestnode included in the
1730 # This gathers information from each manifestnode included in the
1731 # changegroup about which filenodes the manifest node references
1731 # changegroup about which filenodes the manifest node references
1732 # so we can include those in the changegroup too.
1732 # so we can include those in the changegroup too.
1733 #
1733 #
1734 # It also remembers which changenode each filenode belongs to. It
1734 # It also remembers which changenode each filenode belongs to. It
1735 # does this by assuming the a filenode belongs to the changenode
1735 # does this by assuming the a filenode belongs to the changenode
1736 # the first manifest that references it belongs to.
1736 # the first manifest that references it belongs to.
1737 def collect_msng_filenodes(mnfstnode):
1737 def collect_msng_filenodes(mnfstnode):
1738 r = mnfst.rev(mnfstnode)
1738 r = mnfst.rev(mnfstnode)
1739 if r == next_rev[0]:
1739 if r == next_rev[0]:
1740 # If the last rev we looked at was the one just previous,
1740 # If the last rev we looked at was the one just previous,
1741 # we only need to see a diff.
1741 # we only need to see a diff.
1742 deltamf = mnfst.readdelta(mnfstnode)
1742 deltamf = mnfst.readdelta(mnfstnode)
1743 # For each line in the delta
1743 # For each line in the delta
1744 for f, fnode in deltamf.items():
1744 for f, fnode in deltamf.items():
1745 f = changedfiles.get(f, None)
1745 f = changedfiles.get(f, None)
1746 # And if the file is in the list of files we care
1746 # And if the file is in the list of files we care
1747 # about.
1747 # about.
1748 if f is not None:
1748 if f is not None:
1749 # Get the changenode this manifest belongs to
1749 # Get the changenode this manifest belongs to
1750 clnode = msng_mnfst_set[mnfstnode]
1750 clnode = msng_mnfst_set[mnfstnode]
1751 # Create the set of filenodes for the file if
1751 # Create the set of filenodes for the file if
1752 # there isn't one already.
1752 # there isn't one already.
1753 ndset = msng_filenode_set.setdefault(f, {})
1753 ndset = msng_filenode_set.setdefault(f, {})
1754 # And set the filenode's changelog node to the
1754 # And set the filenode's changelog node to the
1755 # manifest's if it hasn't been set already.
1755 # manifest's if it hasn't been set already.
1756 ndset.setdefault(fnode, clnode)
1756 ndset.setdefault(fnode, clnode)
1757 else:
1757 else:
1758 # Otherwise we need a full manifest.
1758 # Otherwise we need a full manifest.
1759 m = mnfst.read(mnfstnode)
1759 m = mnfst.read(mnfstnode)
1760 # For every file in we care about.
1760 # For every file in we care about.
1761 for f in changedfiles:
1761 for f in changedfiles:
1762 fnode = m.get(f, None)
1762 fnode = m.get(f, None)
1763 # If it's in the manifest
1763 # If it's in the manifest
1764 if fnode is not None:
1764 if fnode is not None:
1765 # See comments above.
1765 # See comments above.
1766 clnode = msng_mnfst_set[mnfstnode]
1766 clnode = msng_mnfst_set[mnfstnode]
1767 ndset = msng_filenode_set.setdefault(f, {})
1767 ndset = msng_filenode_set.setdefault(f, {})
1768 ndset.setdefault(fnode, clnode)
1768 ndset.setdefault(fnode, clnode)
1769 # Remember the revision we hope to see next.
1769 # Remember the revision we hope to see next.
1770 next_rev[0] = r + 1
1770 next_rev[0] = r + 1
1771 return collect_msng_filenodes
1771 return collect_msng_filenodes
1772
1772
1773 # We have a list of filenodes we think we need for a file, lets remove
1773 # We have a list of filenodes we think we need for a file, lets remove
1774 # all those we now the recipient must have.
1774 # all those we now the recipient must have.
1775 def prune_filenodes(f, filerevlog):
1775 def prune_filenodes(f, filerevlog):
1776 msngset = msng_filenode_set[f]
1776 msngset = msng_filenode_set[f]
1777 hasset = {}
1777 hasset = {}
1778 # If a 'missing' filenode thinks it belongs to a changenode we
1778 # If a 'missing' filenode thinks it belongs to a changenode we
1779 # assume the recipient must have, then the recipient must have
1779 # assume the recipient must have, then the recipient must have
1780 # that filenode.
1780 # that filenode.
1781 for n in msngset:
1781 for n in msngset:
1782 clnode = cl.node(filerevlog.linkrev(n))
1782 clnode = cl.node(filerevlog.linkrev(n))
1783 if clnode in has_cl_set:
1783 if clnode in has_cl_set:
1784 hasset[n] = 1
1784 hasset[n] = 1
1785 prune_parents(filerevlog, hasset, msngset)
1785 prune_parents(filerevlog, hasset, msngset)
1786
1786
1787 # A function generator function that sets up the a context for the
1787 # A function generator function that sets up the a context for the
1788 # inner function.
1788 # inner function.
1789 def lookup_filenode_link_func(fname):
1789 def lookup_filenode_link_func(fname):
1790 msngset = msng_filenode_set[fname]
1790 msngset = msng_filenode_set[fname]
1791 # Lookup the changenode the filenode belongs to.
1791 # Lookup the changenode the filenode belongs to.
1792 def lookup_filenode_link(fnode):
1792 def lookup_filenode_link(fnode):
1793 return msngset[fnode]
1793 return msngset[fnode]
1794 return lookup_filenode_link
1794 return lookup_filenode_link
1795
1795
1796 # Add the nodes that were explicitly requested.
1796 # Add the nodes that were explicitly requested.
1797 def add_extra_nodes(name, nodes):
1797 def add_extra_nodes(name, nodes):
1798 if not extranodes or name not in extranodes:
1798 if not extranodes or name not in extranodes:
1799 return
1799 return
1800
1800
1801 for node, linknode in extranodes[name]:
1801 for node, linknode in extranodes[name]:
1802 if node not in nodes:
1802 if node not in nodes:
1803 nodes[node] = linknode
1803 nodes[node] = linknode
1804
1804
1805 # Now that we have all theses utility functions to help out and
1805 # Now that we have all theses utility functions to help out and
1806 # logically divide up the task, generate the group.
1806 # logically divide up the task, generate the group.
1807 def gengroup():
1807 def gengroup():
1808 # The set of changed files starts empty.
1808 # The set of changed files starts empty.
1809 changedfiles = {}
1809 changedfiles = {}
1810 # Create a changenode group generator that will call our functions
1810 # Create a changenode group generator that will call our functions
1811 # back to lookup the owning changenode and collect information.
1811 # back to lookup the owning changenode and collect information.
1812 group = cl.group(msng_cl_lst, identity,
1812 group = cl.group(msng_cl_lst, identity,
1813 manifest_and_file_collector(changedfiles))
1813 manifest_and_file_collector(changedfiles))
1814 for chnk in group:
1814 for chnk in group:
1815 yield chnk
1815 yield chnk
1816
1816
1817 # The list of manifests has been collected by the generator
1817 # The list of manifests has been collected by the generator
1818 # calling our functions back.
1818 # calling our functions back.
1819 prune_manifests()
1819 prune_manifests()
1820 add_extra_nodes(1, msng_mnfst_set)
1820 add_extra_nodes(1, msng_mnfst_set)
1821 msng_mnfst_lst = msng_mnfst_set.keys()
1821 msng_mnfst_lst = msng_mnfst_set.keys()
1822 # Sort the manifestnodes by revision number.
1822 # Sort the manifestnodes by revision number.
1823 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1823 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1824 # Create a generator for the manifestnodes that calls our lookup
1824 # Create a generator for the manifestnodes that calls our lookup
1825 # and data collection functions back.
1825 # and data collection functions back.
1826 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1826 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1827 filenode_collector(changedfiles))
1827 filenode_collector(changedfiles))
1828 for chnk in group:
1828 for chnk in group:
1829 yield chnk
1829 yield chnk
1830
1830
1831 # These are no longer needed, dereference and toss the memory for
1831 # These are no longer needed, dereference and toss the memory for
1832 # them.
1832 # them.
1833 msng_mnfst_lst = None
1833 msng_mnfst_lst = None
1834 msng_mnfst_set.clear()
1834 msng_mnfst_set.clear()
1835
1835
1836 if extranodes:
1836 if extranodes:
1837 for fname in extranodes:
1837 for fname in extranodes:
1838 if isinstance(fname, int):
1838 if isinstance(fname, int):
1839 continue
1839 continue
1840 add_extra_nodes(fname,
1840 add_extra_nodes(fname,
1841 msng_filenode_set.setdefault(fname, {}))
1841 msng_filenode_set.setdefault(fname, {}))
1842 changedfiles[fname] = 1
1842 changedfiles[fname] = 1
1843 changedfiles = changedfiles.keys()
1843 changedfiles = changedfiles.keys()
1844 changedfiles.sort()
1844 changedfiles.sort()
1845 # Go through all our files in order sorted by name.
1845 # Go through all our files in order sorted by name.
1846 for fname in changedfiles:
1846 for fname in changedfiles:
1847 filerevlog = self.file(fname)
1847 filerevlog = self.file(fname)
1848 if filerevlog.count() == 0:
1848 if filerevlog.count() == 0:
1849 raise util.Abort(_("empty or missing revlog for %s") % fname)
1849 raise util.Abort(_("empty or missing revlog for %s") % fname)
1850 # Toss out the filenodes that the recipient isn't really
1850 # Toss out the filenodes that the recipient isn't really
1851 # missing.
1851 # missing.
1852 if fname in msng_filenode_set:
1852 if fname in msng_filenode_set:
1853 prune_filenodes(fname, filerevlog)
1853 prune_filenodes(fname, filerevlog)
1854 msng_filenode_lst = msng_filenode_set[fname].keys()
1854 msng_filenode_lst = msng_filenode_set[fname].keys()
1855 else:
1855 else:
1856 msng_filenode_lst = []
1856 msng_filenode_lst = []
1857 # If any filenodes are left, generate the group for them,
1857 # If any filenodes are left, generate the group for them,
1858 # otherwise don't bother.
1858 # otherwise don't bother.
1859 if len(msng_filenode_lst) > 0:
1859 if len(msng_filenode_lst) > 0:
1860 yield changegroup.chunkheader(len(fname))
1860 yield changegroup.chunkheader(len(fname))
1861 yield fname
1861 yield fname
1862 # Sort the filenodes by their revision #
1862 # Sort the filenodes by their revision #
1863 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1863 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1864 # Create a group generator and only pass in a changenode
1864 # Create a group generator and only pass in a changenode
1865 # lookup function as we need to collect no information
1865 # lookup function as we need to collect no information
1866 # from filenodes.
1866 # from filenodes.
1867 group = filerevlog.group(msng_filenode_lst,
1867 group = filerevlog.group(msng_filenode_lst,
1868 lookup_filenode_link_func(fname))
1868 lookup_filenode_link_func(fname))
1869 for chnk in group:
1869 for chnk in group:
1870 yield chnk
1870 yield chnk
1871 if fname in msng_filenode_set:
1871 if fname in msng_filenode_set:
1872 # Don't need this anymore, toss it to free memory.
1872 # Don't need this anymore, toss it to free memory.
1873 del msng_filenode_set[fname]
1873 del msng_filenode_set[fname]
1874 # Signal that no more groups are left.
1874 # Signal that no more groups are left.
1875 yield changegroup.closechunk()
1875 yield changegroup.closechunk()
1876
1876
1877 if msng_cl_lst:
1877 if msng_cl_lst:
1878 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1878 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1879
1879
1880 return util.chunkbuffer(gengroup())
1880 return util.chunkbuffer(gengroup())
1881
1881
1882 def changegroup(self, basenodes, source):
1882 def changegroup(self, basenodes, source):
1883 """Generate a changegroup of all nodes that we have that a recipient
1883 """Generate a changegroup of all nodes that we have that a recipient
1884 doesn't.
1884 doesn't.
1885
1885
1886 This is much easier than the previous function as we can assume that
1886 This is much easier than the previous function as we can assume that
1887 the recipient has any changenode we aren't sending them."""
1887 the recipient has any changenode we aren't sending them."""
1888
1888
1889 self.hook('preoutgoing', throw=True, source=source)
1889 self.hook('preoutgoing', throw=True, source=source)
1890
1890
1891 cl = self.changelog
1891 cl = self.changelog
1892 nodes = cl.nodesbetween(basenodes, None)[0]
1892 nodes = cl.nodesbetween(basenodes, None)[0]
1893 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1893 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1894 self.changegroupinfo(nodes, source)
1894 self.changegroupinfo(nodes, source)
1895
1895
1896 def identity(x):
1896 def identity(x):
1897 return x
1897 return x
1898
1898
1899 def gennodelst(revlog):
1899 def gennodelst(revlog):
1900 for r in xrange(0, revlog.count()):
1900 for r in xrange(0, revlog.count()):
1901 n = revlog.node(r)
1901 n = revlog.node(r)
1902 if revlog.linkrev(n) in revset:
1902 if revlog.linkrev(n) in revset:
1903 yield n
1903 yield n
1904
1904
1905 def changed_file_collector(changedfileset):
1905 def changed_file_collector(changedfileset):
1906 def collect_changed_files(clnode):
1906 def collect_changed_files(clnode):
1907 c = cl.read(clnode)
1907 c = cl.read(clnode)
1908 for fname in c[3]:
1908 for fname in c[3]:
1909 changedfileset[fname] = 1
1909 changedfileset[fname] = 1
1910 return collect_changed_files
1910 return collect_changed_files
1911
1911
1912 def lookuprevlink_func(revlog):
1912 def lookuprevlink_func(revlog):
1913 def lookuprevlink(n):
1913 def lookuprevlink(n):
1914 return cl.node(revlog.linkrev(n))
1914 return cl.node(revlog.linkrev(n))
1915 return lookuprevlink
1915 return lookuprevlink
1916
1916
1917 def gengroup():
1917 def gengroup():
1918 # construct a list of all changed files
1918 # construct a list of all changed files
1919 changedfiles = {}
1919 changedfiles = {}
1920
1920
1921 for chnk in cl.group(nodes, identity,
1921 for chnk in cl.group(nodes, identity,
1922 changed_file_collector(changedfiles)):
1922 changed_file_collector(changedfiles)):
1923 yield chnk
1923 yield chnk
1924 changedfiles = changedfiles.keys()
1924 changedfiles = changedfiles.keys()
1925 changedfiles.sort()
1925 changedfiles.sort()
1926
1926
1927 mnfst = self.manifest
1927 mnfst = self.manifest
1928 nodeiter = gennodelst(mnfst)
1928 nodeiter = gennodelst(mnfst)
1929 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1929 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1930 yield chnk
1930 yield chnk
1931
1931
1932 for fname in changedfiles:
1932 for fname in changedfiles:
1933 filerevlog = self.file(fname)
1933 filerevlog = self.file(fname)
1934 if filerevlog.count() == 0:
1934 if filerevlog.count() == 0:
1935 raise util.Abort(_("empty or missing revlog for %s") % fname)
1935 raise util.Abort(_("empty or missing revlog for %s") % fname)
1936 nodeiter = gennodelst(filerevlog)
1936 nodeiter = gennodelst(filerevlog)
1937 nodeiter = list(nodeiter)
1937 nodeiter = list(nodeiter)
1938 if nodeiter:
1938 if nodeiter:
1939 yield changegroup.chunkheader(len(fname))
1939 yield changegroup.chunkheader(len(fname))
1940 yield fname
1940 yield fname
1941 lookup = lookuprevlink_func(filerevlog)
1941 lookup = lookuprevlink_func(filerevlog)
1942 for chnk in filerevlog.group(nodeiter, lookup):
1942 for chnk in filerevlog.group(nodeiter, lookup):
1943 yield chnk
1943 yield chnk
1944
1944
1945 yield changegroup.closechunk()
1945 yield changegroup.closechunk()
1946
1946
1947 if nodes:
1947 if nodes:
1948 self.hook('outgoing', node=hex(nodes[0]), source=source)
1948 self.hook('outgoing', node=hex(nodes[0]), source=source)
1949
1949
1950 return util.chunkbuffer(gengroup())
1950 return util.chunkbuffer(gengroup())
1951
1951
1952 def addchangegroup(self, source, srctype, url, emptyok=False):
1952 def addchangegroup(self, source, srctype, url, emptyok=False):
1953 """add changegroup to repo.
1953 """add changegroup to repo.
1954
1954
1955 return values:
1955 return values:
1956 - nothing changed or no source: 0
1956 - nothing changed or no source: 0
1957 - more heads than before: 1+added heads (2..n)
1957 - more heads than before: 1+added heads (2..n)
1958 - less heads than before: -1-removed heads (-2..-n)
1958 - less heads than before: -1-removed heads (-2..-n)
1959 - number of heads stays the same: 1
1959 - number of heads stays the same: 1
1960 """
1960 """
1961 def csmap(x):
1961 def csmap(x):
1962 self.ui.debug(_("add changeset %s\n") % short(x))
1962 self.ui.debug(_("add changeset %s\n") % short(x))
1963 return cl.count()
1963 return cl.count()
1964
1964
1965 def revmap(x):
1965 def revmap(x):
1966 return cl.rev(x)
1966 return cl.rev(x)
1967
1967
1968 if not source:
1968 if not source:
1969 return 0
1969 return 0
1970
1970
1971 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1971 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1972
1972
1973 changesets = files = revisions = 0
1973 changesets = files = revisions = 0
1974
1974
1975 # write changelog data to temp files so concurrent readers will not see
1975 # write changelog data to temp files so concurrent readers will not see
1976 # inconsistent view
1976 # inconsistent view
1977 cl = self.changelog
1977 cl = self.changelog
1978 cl.delayupdate()
1978 cl.delayupdate()
1979 oldheads = len(cl.heads())
1979 oldheads = len(cl.heads())
1980
1980
1981 tr = self.transaction()
1981 tr = self.transaction()
1982 try:
1982 try:
1983 trp = weakref.proxy(tr)
1983 trp = weakref.proxy(tr)
1984 # pull off the changeset group
1984 # pull off the changeset group
1985 self.ui.status(_("adding changesets\n"))
1985 self.ui.status(_("adding changesets\n"))
1986 cor = cl.count() - 1
1986 cor = cl.count() - 1
1987 chunkiter = changegroup.chunkiter(source)
1987 chunkiter = changegroup.chunkiter(source)
1988 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1988 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1989 raise util.Abort(_("received changelog group is empty"))
1989 raise util.Abort(_("received changelog group is empty"))
1990 cnr = cl.count() - 1
1990 cnr = cl.count() - 1
1991 changesets = cnr - cor
1991 changesets = cnr - cor
1992
1992
1993 # pull off the manifest group
1993 # pull off the manifest group
1994 self.ui.status(_("adding manifests\n"))
1994 self.ui.status(_("adding manifests\n"))
1995 chunkiter = changegroup.chunkiter(source)
1995 chunkiter = changegroup.chunkiter(source)
1996 # no need to check for empty manifest group here:
1996 # no need to check for empty manifest group here:
1997 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1997 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1998 # no new manifest will be created and the manifest group will
1998 # no new manifest will be created and the manifest group will
1999 # be empty during the pull
1999 # be empty during the pull
2000 self.manifest.addgroup(chunkiter, revmap, trp)
2000 self.manifest.addgroup(chunkiter, revmap, trp)
2001
2001
2002 # process the files
2002 # process the files
2003 self.ui.status(_("adding file changes\n"))
2003 self.ui.status(_("adding file changes\n"))
2004 while 1:
2004 while 1:
2005 f = changegroup.getchunk(source)
2005 f = changegroup.getchunk(source)
2006 if not f:
2006 if not f:
2007 break
2007 break
2008 self.ui.debug(_("adding %s revisions\n") % f)
2008 self.ui.debug(_("adding %s revisions\n") % f)
2009 fl = self.file(f)
2009 fl = self.file(f)
2010 o = fl.count()
2010 o = fl.count()
2011 chunkiter = changegroup.chunkiter(source)
2011 chunkiter = changegroup.chunkiter(source)
2012 if fl.addgroup(chunkiter, revmap, trp) is None:
2012 if fl.addgroup(chunkiter, revmap, trp) is None:
2013 raise util.Abort(_("received file revlog group is empty"))
2013 raise util.Abort(_("received file revlog group is empty"))
2014 revisions += fl.count() - o
2014 revisions += fl.count() - o
2015 files += 1
2015 files += 1
2016
2016
2017 # make changelog see real files again
2017 # make changelog see real files again
2018 cl.finalize(trp)
2018 cl.finalize(trp)
2019
2019
2020 newheads = len(self.changelog.heads())
2020 newheads = len(self.changelog.heads())
2021 heads = ""
2021 heads = ""
2022 if oldheads and newheads != oldheads:
2022 if oldheads and newheads != oldheads:
2023 heads = _(" (%+d heads)") % (newheads - oldheads)
2023 heads = _(" (%+d heads)") % (newheads - oldheads)
2024
2024
2025 self.ui.status(_("added %d changesets"
2025 self.ui.status(_("added %d changesets"
2026 " with %d changes to %d files%s\n")
2026 " with %d changes to %d files%s\n")
2027 % (changesets, revisions, files, heads))
2027 % (changesets, revisions, files, heads))
2028
2028
2029 if changesets > 0:
2029 if changesets > 0:
2030 self.hook('pretxnchangegroup', throw=True,
2030 self.hook('pretxnchangegroup', throw=True,
2031 node=hex(self.changelog.node(cor+1)), source=srctype,
2031 node=hex(self.changelog.node(cor+1)), source=srctype,
2032 url=url)
2032 url=url)
2033
2033
2034 tr.close()
2034 tr.close()
2035 finally:
2035 finally:
2036 del tr
2036 del tr
2037
2037
2038 if changesets > 0:
2038 if changesets > 0:
2039 # forcefully update the on-disk branch cache
2039 # forcefully update the on-disk branch cache
2040 self.ui.debug(_("updating the branch cache\n"))
2040 self.ui.debug(_("updating the branch cache\n"))
2041 self.branchtags()
2041 self.branchtags()
2042 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2042 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2043 source=srctype, url=url)
2043 source=srctype, url=url)
2044
2044
2045 for i in xrange(cor + 1, cnr + 1):
2045 for i in xrange(cor + 1, cnr + 1):
2046 self.hook("incoming", node=hex(self.changelog.node(i)),
2046 self.hook("incoming", node=hex(self.changelog.node(i)),
2047 source=srctype, url=url)
2047 source=srctype, url=url)
2048
2048
2049 # never return 0 here:
2049 # never return 0 here:
2050 if newheads < oldheads:
2050 if newheads < oldheads:
2051 return newheads - oldheads - 1
2051 return newheads - oldheads - 1
2052 else:
2052 else:
2053 return newheads - oldheads + 1
2053 return newheads - oldheads + 1
2054
2054
2055
2055
2056 def stream_in(self, remote):
2056 def stream_in(self, remote):
2057 fp = remote.stream_out()
2057 fp = remote.stream_out()
2058 l = fp.readline()
2058 l = fp.readline()
2059 try:
2059 try:
2060 resp = int(l)
2060 resp = int(l)
2061 except ValueError:
2061 except ValueError:
2062 raise util.UnexpectedOutput(
2062 raise util.UnexpectedOutput(
2063 _('Unexpected response from remote server:'), l)
2063 _('Unexpected response from remote server:'), l)
2064 if resp == 1:
2064 if resp == 1:
2065 raise util.Abort(_('operation forbidden by server'))
2065 raise util.Abort(_('operation forbidden by server'))
2066 elif resp == 2:
2066 elif resp == 2:
2067 raise util.Abort(_('locking the remote repository failed'))
2067 raise util.Abort(_('locking the remote repository failed'))
2068 elif resp != 0:
2068 elif resp != 0:
2069 raise util.Abort(_('the server sent an unknown error code'))
2069 raise util.Abort(_('the server sent an unknown error code'))
2070 self.ui.status(_('streaming all changes\n'))
2070 self.ui.status(_('streaming all changes\n'))
2071 l = fp.readline()
2071 l = fp.readline()
2072 try:
2072 try:
2073 total_files, total_bytes = map(int, l.split(' ', 1))
2073 total_files, total_bytes = map(int, l.split(' ', 1))
2074 except (ValueError, TypeError):
2074 except (ValueError, TypeError):
2075 raise util.UnexpectedOutput(
2075 raise util.UnexpectedOutput(
2076 _('Unexpected response from remote server:'), l)
2076 _('Unexpected response from remote server:'), l)
2077 self.ui.status(_('%d files to transfer, %s of data\n') %
2077 self.ui.status(_('%d files to transfer, %s of data\n') %
2078 (total_files, util.bytecount(total_bytes)))
2078 (total_files, util.bytecount(total_bytes)))
2079 start = time.time()
2079 start = time.time()
2080 for i in xrange(total_files):
2080 for i in xrange(total_files):
2081 # XXX doesn't support '\n' or '\r' in filenames
2081 # XXX doesn't support '\n' or '\r' in filenames
2082 l = fp.readline()
2082 l = fp.readline()
2083 try:
2083 try:
2084 name, size = l.split('\0', 1)
2084 name, size = l.split('\0', 1)
2085 size = int(size)
2085 size = int(size)
2086 except ValueError, TypeError:
2086 except ValueError, TypeError:
2087 raise util.UnexpectedOutput(
2087 raise util.UnexpectedOutput(
2088 _('Unexpected response from remote server:'), l)
2088 _('Unexpected response from remote server:'), l)
2089 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2089 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2090 ofp = self.sopener(name, 'w')
2090 ofp = self.sopener(name, 'w')
2091 for chunk in util.filechunkiter(fp, limit=size):
2091 for chunk in util.filechunkiter(fp, limit=size):
2092 ofp.write(chunk)
2092 ofp.write(chunk)
2093 ofp.close()
2093 ofp.close()
2094 elapsed = time.time() - start
2094 elapsed = time.time() - start
2095 if elapsed <= 0:
2095 if elapsed <= 0:
2096 elapsed = 0.001
2096 elapsed = 0.001
2097 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2097 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2098 (util.bytecount(total_bytes), elapsed,
2098 (util.bytecount(total_bytes), elapsed,
2099 util.bytecount(total_bytes / elapsed)))
2099 util.bytecount(total_bytes / elapsed)))
2100 self.invalidate()
2100 self.invalidate()
2101 return len(self.heads()) + 1
2101 return len(self.heads()) + 1
2102
2102
2103 def clone(self, remote, heads=[], stream=False):
2103 def clone(self, remote, heads=[], stream=False):
2104 '''clone remote repository.
2104 '''clone remote repository.
2105
2105
2106 keyword arguments:
2106 keyword arguments:
2107 heads: list of revs to clone (forces use of pull)
2107 heads: list of revs to clone (forces use of pull)
2108 stream: use streaming clone if possible'''
2108 stream: use streaming clone if possible'''
2109
2109
2110 # now, all clients that can request uncompressed clones can
2110 # now, all clients that can request uncompressed clones can
2111 # read repo formats supported by all servers that can serve
2111 # read repo formats supported by all servers that can serve
2112 # them.
2112 # them.
2113
2113
2114 # if revlog format changes, client will have to check version
2114 # if revlog format changes, client will have to check version
2115 # and format flags on "stream" capability, and use
2115 # and format flags on "stream" capability, and use
2116 # uncompressed only if compatible.
2116 # uncompressed only if compatible.
2117
2117
2118 if stream and not heads and remote.capable('stream'):
2118 if stream and not heads and remote.capable('stream'):
2119 return self.stream_in(remote)
2119 return self.stream_in(remote)
2120 return self.pull(remote, heads)
2120 return self.pull(remote, heads)
2121
2121
2122 # used to avoid circular references so destructors work
2122 # used to avoid circular references so destructors work
2123 def aftertrans(files):
2123 def aftertrans(files):
2124 renamefiles = [tuple(t) for t in files]
2124 renamefiles = [tuple(t) for t in files]
2125 def a():
2125 def a():
2126 for src, dest in renamefiles:
2126 for src, dest in renamefiles:
2127 util.rename(src, dest)
2127 util.rename(src, dest)
2128 return a
2128 return a
2129
2129
2130 def instance(ui, path, create):
2130 def instance(ui, path, create):
2131 return localrepository(ui, util.drop_scheme('file', path), create)
2131 return localrepository(ui, util.drop_scheme('file', path), create)
2132
2132
2133 def islocal(path):
2133 def islocal(path):
2134 return True
2134 return True
@@ -1,1332 +1,1330 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import bin, hex, nullid, nullrev, short
13 from node import bin, hex, nullid, nullrev, short
14 from i18n import _
14 from i18n import _
15 import changegroup, errno, ancestor, mdiff
15 import changegroup, errno, ancestor, mdiff
16 import struct, util, zlib
16 import struct, util, zlib
17
17
18 _pack = struct.pack
18 _pack = struct.pack
19 _unpack = struct.unpack
19 _unpack = struct.unpack
20 _compress = zlib.compress
20 _compress = zlib.compress
21 _decompress = zlib.decompress
21 _decompress = zlib.decompress
22 _sha = util.sha1
22 _sha = util.sha1
23
23
24 # revlog flags
24 # revlog flags
25 REVLOGV0 = 0
25 REVLOGV0 = 0
26 REVLOGNG = 1
26 REVLOGNG = 1
27 REVLOGNGINLINEDATA = (1 << 16)
27 REVLOGNGINLINEDATA = (1 << 16)
28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 REVLOG_DEFAULT_FORMAT = REVLOGNG
29 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31
31
32 class RevlogError(Exception):
32 class RevlogError(Exception):
33 pass
33 pass
34
34
35 class LookupError(RevlogError):
35 class LookupError(RevlogError):
36 def __init__(self, name, index, message):
36 def __init__(self, name, index, message):
37 self.name = name
37 self.name = name
38 if isinstance(name, str) and len(name) == 20:
38 if isinstance(name, str) and len(name) == 20:
39 name = short(name)
39 name = short(name)
40 RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
40 RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
41
41
42 def getoffset(q):
42 def getoffset(q):
43 return int(q >> 16)
43 return int(q >> 16)
44
44
45 def gettype(q):
45 def gettype(q):
46 return int(q & 0xFFFF)
46 return int(q & 0xFFFF)
47
47
48 def offset_type(offset, type):
48 def offset_type(offset, type):
49 return long(long(offset) << 16 | type)
49 return long(long(offset) << 16 | type)
50
50
51 def hash(text, p1, p2):
51 def hash(text, p1, p2):
52 """generate a hash from the given text and its parent hashes
52 """generate a hash from the given text and its parent hashes
53
53
54 This hash combines both the current file contents and its history
54 This hash combines both the current file contents and its history
55 in a manner that makes it easy to distinguish nodes with the same
55 in a manner that makes it easy to distinguish nodes with the same
56 content in the revision graph.
56 content in the revision graph.
57 """
57 """
58 l = [p1, p2]
58 l = [p1, p2]
59 l.sort()
59 l.sort()
60 s = _sha(l[0])
60 s = _sha(l[0])
61 s.update(l[1])
61 s.update(l[1])
62 s.update(text)
62 s.update(text)
63 return s.digest()
63 return s.digest()
64
64
65 def compress(text):
65 def compress(text):
66 """ generate a possibly-compressed representation of text """
66 """ generate a possibly-compressed representation of text """
67 if not text:
67 if not text:
68 return ("", text)
68 return ("", text)
69 l = len(text)
69 l = len(text)
70 bin = None
70 bin = None
71 if l < 44:
71 if l < 44:
72 pass
72 pass
73 elif l > 1000000:
73 elif l > 1000000:
74 # zlib makes an internal copy, thus doubling memory usage for
74 # zlib makes an internal copy, thus doubling memory usage for
75 # large files, so lets do this in pieces
75 # large files, so lets do this in pieces
76 z = zlib.compressobj()
76 z = zlib.compressobj()
77 p = []
77 p = []
78 pos = 0
78 pos = 0
79 while pos < l:
79 while pos < l:
80 pos2 = pos + 2**20
80 pos2 = pos + 2**20
81 p.append(z.compress(text[pos:pos2]))
81 p.append(z.compress(text[pos:pos2]))
82 pos = pos2
82 pos = pos2
83 p.append(z.flush())
83 p.append(z.flush())
84 if sum(map(len, p)) < l:
84 if sum(map(len, p)) < l:
85 bin = "".join(p)
85 bin = "".join(p)
86 else:
86 else:
87 bin = _compress(text)
87 bin = _compress(text)
88 if bin is None or len(bin) > l:
88 if bin is None or len(bin) > l:
89 if text[0] == '\0':
89 if text[0] == '\0':
90 return ("", text)
90 return ("", text)
91 return ('u', text)
91 return ('u', text)
92 return ("", bin)
92 return ("", bin)
93
93
94 def decompress(bin):
94 def decompress(bin):
95 """ decompress the given input """
95 """ decompress the given input """
96 if not bin:
96 if not bin:
97 return bin
97 return bin
98 t = bin[0]
98 t = bin[0]
99 if t == '\0':
99 if t == '\0':
100 return bin
100 return bin
101 if t == 'x':
101 if t == 'x':
102 return _decompress(bin)
102 return _decompress(bin)
103 if t == 'u':
103 if t == 'u':
104 return bin[1:]
104 return bin[1:]
105 raise RevlogError(_("unknown compression type %r") % t)
105 raise RevlogError(_("unknown compression type %r") % t)
106
106
107 class lazyparser(object):
107 class lazyparser(object):
108 """
108 """
109 this class avoids the need to parse the entirety of large indices
109 this class avoids the need to parse the entirety of large indices
110 """
110 """
111
111
112 # lazyparser is not safe to use on windows if win32 extensions not
112 # lazyparser is not safe to use on windows if win32 extensions not
113 # available. it keeps file handle open, which make it not possible
113 # available. it keeps file handle open, which make it not possible
114 # to break hardlinks on local cloned repos.
114 # to break hardlinks on local cloned repos.
115
115
116 def __init__(self, dataf, size):
116 def __init__(self, dataf, size):
117 self.dataf = dataf
117 self.dataf = dataf
118 self.s = struct.calcsize(indexformatng)
118 self.s = struct.calcsize(indexformatng)
119 self.datasize = size
119 self.datasize = size
120 self.l = size/self.s
120 self.l = size/self.s
121 self.index = [None] * self.l
121 self.index = [None] * self.l
122 self.map = {nullid: nullrev}
122 self.map = {nullid: nullrev}
123 self.allmap = 0
123 self.allmap = 0
124 self.all = 0
124 self.all = 0
125 self.mapfind_count = 0
125 self.mapfind_count = 0
126
126
127 def loadmap(self):
127 def loadmap(self):
128 """
128 """
129 during a commit, we need to make sure the rev being added is
129 during a commit, we need to make sure the rev being added is
130 not a duplicate. This requires loading the entire index,
130 not a duplicate. This requires loading the entire index,
131 which is fairly slow. loadmap can load up just the node map,
131 which is fairly slow. loadmap can load up just the node map,
132 which takes much less time.
132 which takes much less time.
133 """
133 """
134 if self.allmap:
134 if self.allmap:
135 return
135 return
136 end = self.datasize
136 end = self.datasize
137 self.allmap = 1
137 self.allmap = 1
138 cur = 0
138 cur = 0
139 count = 0
139 count = 0
140 blocksize = self.s * 256
140 blocksize = self.s * 256
141 self.dataf.seek(0)
141 self.dataf.seek(0)
142 while cur < end:
142 while cur < end:
143 data = self.dataf.read(blocksize)
143 data = self.dataf.read(blocksize)
144 off = 0
144 off = 0
145 for x in xrange(256):
145 for x in xrange(256):
146 n = data[off + ngshaoffset:off + ngshaoffset + 20]
146 n = data[off + ngshaoffset:off + ngshaoffset + 20]
147 self.map[n] = count
147 self.map[n] = count
148 count += 1
148 count += 1
149 if count >= self.l:
149 if count >= self.l:
150 break
150 break
151 off += self.s
151 off += self.s
152 cur += blocksize
152 cur += blocksize
153
153
154 def loadblock(self, blockstart, blocksize, data=None):
154 def loadblock(self, blockstart, blocksize, data=None):
155 if self.all:
155 if self.all:
156 return
156 return
157 if data is None:
157 if data is None:
158 self.dataf.seek(blockstart)
158 self.dataf.seek(blockstart)
159 if blockstart + blocksize > self.datasize:
159 if blockstart + blocksize > self.datasize:
160 # the revlog may have grown since we've started running,
160 # the revlog may have grown since we've started running,
161 # but we don't have space in self.index for more entries.
161 # but we don't have space in self.index for more entries.
162 # limit blocksize so that we don't get too much data.
162 # limit blocksize so that we don't get too much data.
163 blocksize = max(self.datasize - blockstart, 0)
163 blocksize = max(self.datasize - blockstart, 0)
164 data = self.dataf.read(blocksize)
164 data = self.dataf.read(blocksize)
165 lend = len(data) / self.s
165 lend = len(data) / self.s
166 i = blockstart / self.s
166 i = blockstart / self.s
167 off = 0
167 off = 0
168 # lazyindex supports __delitem__
168 # lazyindex supports __delitem__
169 if lend > len(self.index) - i:
169 if lend > len(self.index) - i:
170 lend = len(self.index) - i
170 lend = len(self.index) - i
171 for x in xrange(lend):
171 for x in xrange(lend):
172 if self.index[i + x] == None:
172 if self.index[i + x] == None:
173 b = data[off : off + self.s]
173 b = data[off : off + self.s]
174 self.index[i + x] = b
174 self.index[i + x] = b
175 n = b[ngshaoffset:ngshaoffset + 20]
175 n = b[ngshaoffset:ngshaoffset + 20]
176 self.map[n] = i + x
176 self.map[n] = i + x
177 off += self.s
177 off += self.s
178
178
179 def findnode(self, node):
179 def findnode(self, node):
180 """search backwards through the index file for a specific node"""
180 """search backwards through the index file for a specific node"""
181 if self.allmap:
181 if self.allmap:
182 return None
182 return None
183
183
184 # hg log will cause many many searches for the manifest
184 # hg log will cause many many searches for the manifest
185 # nodes. After we get called a few times, just load the whole
185 # nodes. After we get called a few times, just load the whole
186 # thing.
186 # thing.
187 if self.mapfind_count > 8:
187 if self.mapfind_count > 8:
188 self.loadmap()
188 self.loadmap()
189 if node in self.map:
189 if node in self.map:
190 return node
190 return node
191 return None
191 return None
192 self.mapfind_count += 1
192 self.mapfind_count += 1
193 last = self.l - 1
193 last = self.l - 1
194 while self.index[last] != None:
194 while self.index[last] != None:
195 if last == 0:
195 if last == 0:
196 self.all = 1
196 self.all = 1
197 self.allmap = 1
197 self.allmap = 1
198 return None
198 return None
199 last -= 1
199 last -= 1
200 end = (last + 1) * self.s
200 end = (last + 1) * self.s
201 blocksize = self.s * 256
201 blocksize = self.s * 256
202 while end >= 0:
202 while end >= 0:
203 start = max(end - blocksize, 0)
203 start = max(end - blocksize, 0)
204 self.dataf.seek(start)
204 self.dataf.seek(start)
205 data = self.dataf.read(end - start)
205 data = self.dataf.read(end - start)
206 findend = end - start
206 findend = end - start
207 while True:
207 while True:
208 # we're searching backwards, so we have to make sure
208 # we're searching backwards, so we have to make sure
209 # we don't find a changeset where this node is a parent
209 # we don't find a changeset where this node is a parent
210 off = data.find(node, 0, findend)
210 off = data.find(node, 0, findend)
211 findend = off
211 findend = off
212 if off >= 0:
212 if off >= 0:
213 i = off / self.s
213 i = off / self.s
214 off = i * self.s
214 off = i * self.s
215 n = data[off + ngshaoffset:off + ngshaoffset + 20]
215 n = data[off + ngshaoffset:off + ngshaoffset + 20]
216 if n == node:
216 if n == node:
217 self.map[n] = i + start / self.s
217 self.map[n] = i + start / self.s
218 return node
218 return node
219 else:
219 else:
220 break
220 break
221 end -= blocksize
221 end -= blocksize
222 return None
222 return None
223
223
224 def loadindex(self, i=None, end=None):
224 def loadindex(self, i=None, end=None):
225 if self.all:
225 if self.all:
226 return
226 return
227 all = False
227 all = False
228 if i == None:
228 if i == None:
229 blockstart = 0
229 blockstart = 0
230 blocksize = (65536 / self.s) * self.s
230 blocksize = (65536 / self.s) * self.s
231 end = self.datasize
231 end = self.datasize
232 all = True
232 all = True
233 else:
233 else:
234 if end:
234 if end:
235 blockstart = i * self.s
235 blockstart = i * self.s
236 end = end * self.s
236 end = end * self.s
237 blocksize = end - blockstart
237 blocksize = end - blockstart
238 else:
238 else:
239 blockstart = (i & ~1023) * self.s
239 blockstart = (i & ~1023) * self.s
240 blocksize = self.s * 1024
240 blocksize = self.s * 1024
241 end = blockstart + blocksize
241 end = blockstart + blocksize
242 while blockstart < end:
242 while blockstart < end:
243 self.loadblock(blockstart, blocksize)
243 self.loadblock(blockstart, blocksize)
244 blockstart += blocksize
244 blockstart += blocksize
245 if all:
245 if all:
246 self.all = True
246 self.all = True
247
247
248 class lazyindex(object):
248 class lazyindex(object):
249 """a lazy version of the index array"""
249 """a lazy version of the index array"""
250 def __init__(self, parser):
250 def __init__(self, parser):
251 self.p = parser
251 self.p = parser
252 def __len__(self):
252 def __len__(self):
253 return len(self.p.index)
253 return len(self.p.index)
254 def load(self, pos):
254 def load(self, pos):
255 if pos < 0:
255 if pos < 0:
256 pos += len(self.p.index)
256 pos += len(self.p.index)
257 self.p.loadindex(pos)
257 self.p.loadindex(pos)
258 return self.p.index[pos]
258 return self.p.index[pos]
259 def __getitem__(self, pos):
259 def __getitem__(self, pos):
260 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
260 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
261 def __setitem__(self, pos, item):
261 def __setitem__(self, pos, item):
262 self.p.index[pos] = _pack(indexformatng, *item)
262 self.p.index[pos] = _pack(indexformatng, *item)
263 def __delitem__(self, pos):
263 def __delitem__(self, pos):
264 del self.p.index[pos]
264 del self.p.index[pos]
265 def insert(self, pos, e):
265 def insert(self, pos, e):
266 self.p.index.insert(pos, _pack(indexformatng, *e))
266 self.p.index.insert(pos, _pack(indexformatng, *e))
267 def append(self, e):
267 def append(self, e):
268 self.p.index.append(_pack(indexformatng, *e))
268 self.p.index.append(_pack(indexformatng, *e))
269
269
270 class lazymap(object):
270 class lazymap(object):
271 """a lazy version of the node map"""
271 """a lazy version of the node map"""
272 def __init__(self, parser):
272 def __init__(self, parser):
273 self.p = parser
273 self.p = parser
274 def load(self, key):
274 def load(self, key):
275 n = self.p.findnode(key)
275 n = self.p.findnode(key)
276 if n == None:
276 if n == None:
277 raise KeyError(key)
277 raise KeyError(key)
278 def __contains__(self, key):
278 def __contains__(self, key):
279 if key in self.p.map:
279 if key in self.p.map:
280 return True
280 return True
281 self.p.loadmap()
281 self.p.loadmap()
282 return key in self.p.map
282 return key in self.p.map
283 def __iter__(self):
283 def __iter__(self):
284 yield nullid
284 yield nullid
285 for i in xrange(self.p.l):
285 for i in xrange(self.p.l):
286 ret = self.p.index[i]
286 ret = self.p.index[i]
287 if not ret:
287 if not ret:
288 self.p.loadindex(i)
288 self.p.loadindex(i)
289 ret = self.p.index[i]
289 ret = self.p.index[i]
290 if isinstance(ret, str):
290 if isinstance(ret, str):
291 ret = _unpack(indexformatng, ret)
291 ret = _unpack(indexformatng, ret)
292 yield ret[7]
292 yield ret[7]
293 def __getitem__(self, key):
293 def __getitem__(self, key):
294 try:
294 try:
295 return self.p.map[key]
295 return self.p.map[key]
296 except KeyError:
296 except KeyError:
297 try:
297 try:
298 self.load(key)
298 self.load(key)
299 return self.p.map[key]
299 return self.p.map[key]
300 except KeyError:
300 except KeyError:
301 raise KeyError("node " + hex(key))
301 raise KeyError("node " + hex(key))
302 def __setitem__(self, key, val):
302 def __setitem__(self, key, val):
303 self.p.map[key] = val
303 self.p.map[key] = val
304 def __delitem__(self, key):
304 def __delitem__(self, key):
305 del self.p.map[key]
305 del self.p.map[key]
306
306
307 indexformatv0 = ">4l20s20s20s"
307 indexformatv0 = ">4l20s20s20s"
308 v0shaoffset = 56
308 v0shaoffset = 56
309
309
310 class revlogoldio(object):
310 class revlogoldio(object):
311 def __init__(self):
311 def __init__(self):
312 self.size = struct.calcsize(indexformatv0)
312 self.size = struct.calcsize(indexformatv0)
313
313
314 def parseindex(self, fp, inline):
314 def parseindex(self, fp, inline):
315 s = self.size
315 s = self.size
316 index = []
316 index = []
317 nodemap = {nullid: nullrev}
317 nodemap = {nullid: nullrev}
318 n = off = 0
318 n = off = 0
319 data = fp.read()
319 data = fp.read()
320 l = len(data)
320 l = len(data)
321 while off + s <= l:
321 while off + s <= l:
322 cur = data[off:off + s]
322 cur = data[off:off + s]
323 off += s
323 off += s
324 e = _unpack(indexformatv0, cur)
324 e = _unpack(indexformatv0, cur)
325 # transform to revlogv1 format
325 # transform to revlogv1 format
326 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
326 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
327 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
327 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
328 index.append(e2)
328 index.append(e2)
329 nodemap[e[6]] = n
329 nodemap[e[6]] = n
330 n += 1
330 n += 1
331
331
332 return index, nodemap, None
332 return index, nodemap, None
333
333
334 def packentry(self, entry, node, version, rev):
334 def packentry(self, entry, node, version, rev):
335 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
335 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
336 node(entry[5]), node(entry[6]), entry[7])
336 node(entry[5]), node(entry[6]), entry[7])
337 return _pack(indexformatv0, *e2)
337 return _pack(indexformatv0, *e2)
338
338
339 # index ng:
339 # index ng:
340 # 6 bytes offset
340 # 6 bytes offset
341 # 2 bytes flags
341 # 2 bytes flags
342 # 4 bytes compressed length
342 # 4 bytes compressed length
343 # 4 bytes uncompressed length
343 # 4 bytes uncompressed length
344 # 4 bytes: base rev
344 # 4 bytes: base rev
345 # 4 bytes link rev
345 # 4 bytes link rev
346 # 4 bytes parent 1 rev
346 # 4 bytes parent 1 rev
347 # 4 bytes parent 2 rev
347 # 4 bytes parent 2 rev
348 # 32 bytes: nodeid
348 # 32 bytes: nodeid
349 indexformatng = ">Qiiiiii20s12x"
349 indexformatng = ">Qiiiiii20s12x"
350 ngshaoffset = 32
350 ngshaoffset = 32
351 versionformat = ">I"
351 versionformat = ">I"
352
352
353 class revlogio(object):
353 class revlogio(object):
354 def __init__(self):
354 def __init__(self):
355 self.size = struct.calcsize(indexformatng)
355 self.size = struct.calcsize(indexformatng)
356
356
357 def parseindex(self, fp, inline):
357 def parseindex(self, fp, inline):
358 try:
358 try:
359 size = util.fstat(fp).st_size
359 size = util.fstat(fp).st_size
360 except AttributeError:
360 except AttributeError:
361 size = 0
361 size = 0
362
362
363 if util.openhardlinks() and not inline and size > 1000000:
363 if util.openhardlinks() and not inline and size > 1000000:
364 # big index, let's parse it on demand
364 # big index, let's parse it on demand
365 parser = lazyparser(fp, size)
365 parser = lazyparser(fp, size)
366 index = lazyindex(parser)
366 index = lazyindex(parser)
367 nodemap = lazymap(parser)
367 nodemap = lazymap(parser)
368 e = list(index[0])
368 e = list(index[0])
369 type = gettype(e[0])
369 type = gettype(e[0])
370 e[0] = offset_type(0, type)
370 e[0] = offset_type(0, type)
371 index[0] = e
371 index[0] = e
372 return index, nodemap, None
372 return index, nodemap, None
373
373
374 s = self.size
374 s = self.size
375 cache = None
375 cache = None
376 index = []
376 index = []
377 nodemap = {nullid: nullrev}
377 nodemap = {nullid: nullrev}
378 n = off = 0
378 n = off = 0
379 # if we're not using lazymap, always read the whole index
379 # if we're not using lazymap, always read the whole index
380 data = fp.read()
380 data = fp.read()
381 l = len(data) - s
381 l = len(data) - s
382 append = index.append
382 append = index.append
383 if inline:
383 if inline:
384 cache = (0, data)
384 cache = (0, data)
385 while off <= l:
385 while off <= l:
386 e = _unpack(indexformatng, data[off:off + s])
386 e = _unpack(indexformatng, data[off:off + s])
387 nodemap[e[7]] = n
387 nodemap[e[7]] = n
388 append(e)
388 append(e)
389 n += 1
389 n += 1
390 if e[1] < 0:
390 if e[1] < 0:
391 break
391 break
392 off += e[1] + s
392 off += e[1] + s
393 else:
393 else:
394 while off <= l:
394 while off <= l:
395 e = _unpack(indexformatng, data[off:off + s])
395 e = _unpack(indexformatng, data[off:off + s])
396 nodemap[e[7]] = n
396 nodemap[e[7]] = n
397 append(e)
397 append(e)
398 n += 1
398 n += 1
399 off += s
399 off += s
400
400
401 e = list(index[0])
401 e = list(index[0])
402 type = gettype(e[0])
402 type = gettype(e[0])
403 e[0] = offset_type(0, type)
403 e[0] = offset_type(0, type)
404 index[0] = e
404 index[0] = e
405
405
406 return index, nodemap, cache
406 return index, nodemap, cache
407
407
408 def packentry(self, entry, node, version, rev):
408 def packentry(self, entry, node, version, rev):
409 p = _pack(indexformatng, *entry)
409 p = _pack(indexformatng, *entry)
410 if rev == 0:
410 if rev == 0:
411 p = _pack(versionformat, version) + p[4:]
411 p = _pack(versionformat, version) + p[4:]
412 return p
412 return p
413
413
414 class revlog(object):
414 class revlog(object):
415 """
415 """
416 the underlying revision storage object
416 the underlying revision storage object
417
417
418 A revlog consists of two parts, an index and the revision data.
418 A revlog consists of two parts, an index and the revision data.
419
419
420 The index is a file with a fixed record size containing
420 The index is a file with a fixed record size containing
421 information on each revision, includings its nodeid (hash), the
421 information on each revision, includings its nodeid (hash), the
422 nodeids of its parents, the position and offset of its data within
422 nodeids of its parents, the position and offset of its data within
423 the data file, and the revision it's based on. Finally, each entry
423 the data file, and the revision it's based on. Finally, each entry
424 contains a linkrev entry that can serve as a pointer to external
424 contains a linkrev entry that can serve as a pointer to external
425 data.
425 data.
426
426
427 The revision data itself is a linear collection of data chunks.
427 The revision data itself is a linear collection of data chunks.
428 Each chunk represents a revision and is usually represented as a
428 Each chunk represents a revision and is usually represented as a
429 delta against the previous chunk. To bound lookup time, runs of
429 delta against the previous chunk. To bound lookup time, runs of
430 deltas are limited to about 2 times the length of the original
430 deltas are limited to about 2 times the length of the original
431 version data. This makes retrieval of a version proportional to
431 version data. This makes retrieval of a version proportional to
432 its size, or O(1) relative to the number of revisions.
432 its size, or O(1) relative to the number of revisions.
433
433
434 Both pieces of the revlog are written to in an append-only
434 Both pieces of the revlog are written to in an append-only
435 fashion, which means we never need to rewrite a file to insert or
435 fashion, which means we never need to rewrite a file to insert or
436 remove data, and can use some simple techniques to avoid the need
436 remove data, and can use some simple techniques to avoid the need
437 for locking while reading.
437 for locking while reading.
438 """
438 """
439 def __init__(self, opener, indexfile):
439 def __init__(self, opener, indexfile):
440 """
440 """
441 create a revlog object
441 create a revlog object
442
442
443 opener is a function that abstracts the file opening operation
443 opener is a function that abstracts the file opening operation
444 and can be used to implement COW semantics or the like.
444 and can be used to implement COW semantics or the like.
445 """
445 """
446 self.indexfile = indexfile
446 self.indexfile = indexfile
447 self.datafile = indexfile[:-2] + ".d"
447 self.datafile = indexfile[:-2] + ".d"
448 self.opener = opener
448 self.opener = opener
449 self._cache = None
449 self._cache = None
450 self._chunkcache = None
450 self._chunkcache = None
451 self.nodemap = {nullid: nullrev}
451 self.nodemap = {nullid: nullrev}
452 self.index = []
452 self.index = []
453
453
454 v = REVLOG_DEFAULT_VERSION
454 v = REVLOG_DEFAULT_VERSION
455 if hasattr(opener, "defversion"):
455 if hasattr(opener, "defversion"):
456 v = opener.defversion
456 v = opener.defversion
457 if v & REVLOGNG:
457 if v & REVLOGNG:
458 v |= REVLOGNGINLINEDATA
458 v |= REVLOGNGINLINEDATA
459
459
460 i = ""
460 i = ""
461 try:
461 try:
462 f = self.opener(self.indexfile)
462 f = self.opener(self.indexfile)
463 i = f.read(4)
463 i = f.read(4)
464 f.seek(0)
464 f.seek(0)
465 if len(i) > 0:
465 if len(i) > 0:
466 v = struct.unpack(versionformat, i)[0]
466 v = struct.unpack(versionformat, i)[0]
467 except IOError, inst:
467 except IOError, inst:
468 if inst.errno != errno.ENOENT:
468 if inst.errno != errno.ENOENT:
469 raise
469 raise
470
470
471 self.version = v
471 self.version = v
472 self._inline = v & REVLOGNGINLINEDATA
472 self._inline = v & REVLOGNGINLINEDATA
473 flags = v & ~0xFFFF
473 flags = v & ~0xFFFF
474 fmt = v & 0xFFFF
474 fmt = v & 0xFFFF
475 if fmt == REVLOGV0 and flags:
475 if fmt == REVLOGV0 and flags:
476 raise RevlogError(_("index %s unknown flags %#04x for format v0")
476 raise RevlogError(_("index %s unknown flags %#04x for format v0")
477 % (self.indexfile, flags >> 16))
477 % (self.indexfile, flags >> 16))
478 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
478 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
479 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
479 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
480 % (self.indexfile, flags >> 16))
480 % (self.indexfile, flags >> 16))
481 elif fmt > REVLOGNG:
481 elif fmt > REVLOGNG:
482 raise RevlogError(_("index %s unknown format %d")
482 raise RevlogError(_("index %s unknown format %d")
483 % (self.indexfile, fmt))
483 % (self.indexfile, fmt))
484
484
485 self._io = revlogio()
485 self._io = revlogio()
486 if self.version == REVLOGV0:
486 if self.version == REVLOGV0:
487 self._io = revlogoldio()
487 self._io = revlogoldio()
488 if i:
488 if i:
489 d = self._io.parseindex(f, self._inline)
489 d = self._io.parseindex(f, self._inline)
490 self.index, self.nodemap, self._chunkcache = d
490 self.index, self.nodemap, self._chunkcache = d
491
491
492 # add the magic null revision at -1
492 # add the magic null revision at -1
493 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
493 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
494
494
495 def _loadindex(self, start, end):
495 def _loadindex(self, start, end):
496 """load a block of indexes all at once from the lazy parser"""
496 """load a block of indexes all at once from the lazy parser"""
497 if isinstance(self.index, lazyindex):
497 if isinstance(self.index, lazyindex):
498 self.index.p.loadindex(start, end)
498 self.index.p.loadindex(start, end)
499
499
500 def _loadindexmap(self):
500 def _loadindexmap(self):
501 """loads both the map and the index from the lazy parser"""
501 """loads both the map and the index from the lazy parser"""
502 if isinstance(self.index, lazyindex):
502 if isinstance(self.index, lazyindex):
503 p = self.index.p
503 p = self.index.p
504 p.loadindex()
504 p.loadindex()
505 self.nodemap = p.map
505 self.nodemap = p.map
506
506
507 def _loadmap(self):
507 def _loadmap(self):
508 """loads the map from the lazy parser"""
508 """loads the map from the lazy parser"""
509 if isinstance(self.nodemap, lazymap):
509 if isinstance(self.nodemap, lazymap):
510 self.nodemap.p.loadmap()
510 self.nodemap.p.loadmap()
511 self.nodemap = self.nodemap.p.map
511 self.nodemap = self.nodemap.p.map
512
512
513 def tip(self):
513 def tip(self):
514 return self.node(len(self.index) - 2)
514 return self.node(len(self.index) - 2)
515 def count(self):
515 def count(self):
516 return len(self.index) - 1
516 return len(self.index) - 1
517
517
518 def rev(self, node):
518 def rev(self, node):
519 try:
519 try:
520 return self.nodemap[node]
520 return self.nodemap[node]
521 except KeyError:
521 except KeyError:
522 raise LookupError(node, self.indexfile, _('no node'))
522 raise LookupError(node, self.indexfile, _('no node'))
523 def node(self, rev):
523 def node(self, rev):
524 return self.index[rev][7]
524 return self.index[rev][7]
525 def linkrev(self, node):
525 def linkrev(self, node):
526 return self.index[self.rev(node)][4]
526 return self.index[self.rev(node)][4]
527 def parents(self, node):
527 def parents(self, node):
528 d = self.index[self.rev(node)][5:7]
528 d = self.index[self.rev(node)][5:7]
529 return (self.node(d[0]), self.node(d[1]))
529 return (self.node(d[0]), self.node(d[1]))
530 def parentrevs(self, rev):
530 def parentrevs(self, rev):
531 return self.index[rev][5:7]
531 return self.index[rev][5:7]
532 def start(self, rev):
532 def start(self, rev):
533 return int(self.index[rev][0] >> 16)
533 return int(self.index[rev][0] >> 16)
534 def end(self, rev):
534 def end(self, rev):
535 return self.start(rev) + self.length(rev)
535 return self.start(rev) + self.length(rev)
536 def length(self, rev):
536 def length(self, rev):
537 return self.index[rev][1]
537 return self.index[rev][1]
538 def base(self, rev):
538 def base(self, rev):
539 return self.index[rev][3]
539 return self.index[rev][3]
540
540
541 def size(self, rev):
541 def size(self, rev):
542 """return the length of the uncompressed text for a given revision"""
542 """return the length of the uncompressed text for a given revision"""
543 l = self.index[rev][2]
543 l = self.index[rev][2]
544 if l >= 0:
544 if l >= 0:
545 return l
545 return l
546
546
547 t = self.revision(self.node(rev))
547 t = self.revision(self.node(rev))
548 return len(t)
548 return len(t)
549
549
550 # alternate implementation, The advantage to this code is it
550 # alternate implementation, The advantage to this code is it
551 # will be faster for a single revision. But, the results are not
551 # will be faster for a single revision. But, the results are not
552 # cached, so finding the size of every revision will be slower.
552 # cached, so finding the size of every revision will be slower.
553 """
553 """
554 if self.cache and self.cache[1] == rev:
554 if self.cache and self.cache[1] == rev:
555 return len(self.cache[2])
555 return len(self.cache[2])
556
556
557 base = self.base(rev)
557 base = self.base(rev)
558 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
558 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
559 base = self.cache[1]
559 base = self.cache[1]
560 text = self.cache[2]
560 text = self.cache[2]
561 else:
561 else:
562 text = self.revision(self.node(base))
562 text = self.revision(self.node(base))
563
563
564 l = len(text)
564 l = len(text)
565 for x in xrange(base + 1, rev + 1):
565 for x in xrange(base + 1, rev + 1):
566 l = mdiff.patchedsize(l, self.chunk(x))
566 l = mdiff.patchedsize(l, self.chunk(x))
567 return l
567 return l
568 """
568 """
569
569
570 def reachable(self, node, stop=None):
570 def reachable(self, node, stop=None):
571 """return a hash of all nodes ancestral to a given node, including
571 """return a hash of all nodes ancestral to a given node, including
572 the node itself, stopping when stop is matched"""
572 the node itself, stopping when stop is matched"""
573 reachable = {}
573 reachable = {}
574 visit = [node]
574 visit = [node]
575 reachable[node] = 1
575 reachable[node] = 1
576 if stop:
576 if stop:
577 stopn = self.rev(stop)
577 stopn = self.rev(stop)
578 else:
578 else:
579 stopn = 0
579 stopn = 0
580 while visit:
580 while visit:
581 n = visit.pop(0)
581 n = visit.pop(0)
582 if n == stop:
582 if n == stop:
583 continue
583 continue
584 if n == nullid:
584 if n == nullid:
585 continue
585 continue
586 for p in self.parents(n):
586 for p in self.parents(n):
587 if self.rev(p) < stopn:
587 if self.rev(p) < stopn:
588 continue
588 continue
589 if p not in reachable:
589 if p not in reachable:
590 reachable[p] = 1
590 reachable[p] = 1
591 visit.append(p)
591 visit.append(p)
592 return reachable
592 return reachable
593
593
594 def nodesbetween(self, roots=None, heads=None):
594 def nodesbetween(self, roots=None, heads=None):
595 """Return a tuple containing three elements. Elements 1 and 2 contain
595 """Return a tuple containing three elements. Elements 1 and 2 contain
596 a final list bases and heads after all the unreachable ones have been
596 a final list bases and heads after all the unreachable ones have been
597 pruned. Element 0 contains a topologically sorted list of all
597 pruned. Element 0 contains a topologically sorted list of all
598
598
599 nodes that satisfy these constraints:
599 nodes that satisfy these constraints:
600 1. All nodes must be descended from a node in roots (the nodes on
600 1. All nodes must be descended from a node in roots (the nodes on
601 roots are considered descended from themselves).
601 roots are considered descended from themselves).
602 2. All nodes must also be ancestors of a node in heads (the nodes in
602 2. All nodes must also be ancestors of a node in heads (the nodes in
603 heads are considered to be their own ancestors).
603 heads are considered to be their own ancestors).
604
604
605 If roots is unspecified, nullid is assumed as the only root.
605 If roots is unspecified, nullid is assumed as the only root.
606 If heads is unspecified, it is taken to be the output of the
606 If heads is unspecified, it is taken to be the output of the
607 heads method (i.e. a list of all nodes in the repository that
607 heads method (i.e. a list of all nodes in the repository that
608 have no children)."""
608 have no children)."""
609 nonodes = ([], [], [])
609 nonodes = ([], [], [])
610 if roots is not None:
610 if roots is not None:
611 roots = list(roots)
611 roots = list(roots)
612 if not roots:
612 if not roots:
613 return nonodes
613 return nonodes
614 lowestrev = min([self.rev(n) for n in roots])
614 lowestrev = min([self.rev(n) for n in roots])
615 else:
615 else:
616 roots = [nullid] # Everybody's a descendent of nullid
616 roots = [nullid] # Everybody's a descendent of nullid
617 lowestrev = nullrev
617 lowestrev = nullrev
618 if (lowestrev == nullrev) and (heads is None):
618 if (lowestrev == nullrev) and (heads is None):
619 # We want _all_ the nodes!
619 # We want _all_ the nodes!
620 return ([self.node(r) for r in xrange(0, self.count())],
620 return ([self.node(r) for r in xrange(0, self.count())],
621 [nullid], list(self.heads()))
621 [nullid], list(self.heads()))
622 if heads is None:
622 if heads is None:
623 # All nodes are ancestors, so the latest ancestor is the last
623 # All nodes are ancestors, so the latest ancestor is the last
624 # node.
624 # node.
625 highestrev = self.count() - 1
625 highestrev = self.count() - 1
626 # Set ancestors to None to signal that every node is an ancestor.
626 # Set ancestors to None to signal that every node is an ancestor.
627 ancestors = None
627 ancestors = None
628 # Set heads to an empty dictionary for later discovery of heads
628 # Set heads to an empty dictionary for later discovery of heads
629 heads = {}
629 heads = {}
630 else:
630 else:
631 heads = list(heads)
631 heads = list(heads)
632 if not heads:
632 if not heads:
633 return nonodes
633 return nonodes
634 ancestors = {}
634 ancestors = {}
635 # Turn heads into a dictionary so we can remove 'fake' heads.
635 # Turn heads into a dictionary so we can remove 'fake' heads.
636 # Also, later we will be using it to filter out the heads we can't
636 # Also, later we will be using it to filter out the heads we can't
637 # find from roots.
637 # find from roots.
638 heads = dict.fromkeys(heads, 0)
638 heads = dict.fromkeys(heads, 0)
639 # Start at the top and keep marking parents until we're done.
639 # Start at the top and keep marking parents until we're done.
640 nodestotag = heads.keys()
640 nodestotag = heads.keys()
641 # Remember where the top was so we can use it as a limit later.
641 # Remember where the top was so we can use it as a limit later.
642 highestrev = max([self.rev(n) for n in nodestotag])
642 highestrev = max([self.rev(n) for n in nodestotag])
643 while nodestotag:
643 while nodestotag:
644 # grab a node to tag
644 # grab a node to tag
645 n = nodestotag.pop()
645 n = nodestotag.pop()
646 # Never tag nullid
646 # Never tag nullid
647 if n == nullid:
647 if n == nullid:
648 continue
648 continue
649 # A node's revision number represents its place in a
649 # A node's revision number represents its place in a
650 # topologically sorted list of nodes.
650 # topologically sorted list of nodes.
651 r = self.rev(n)
651 r = self.rev(n)
652 if r >= lowestrev:
652 if r >= lowestrev:
653 if n not in ancestors:
653 if n not in ancestors:
654 # If we are possibly a descendent of one of the roots
654 # If we are possibly a descendent of one of the roots
655 # and we haven't already been marked as an ancestor
655 # and we haven't already been marked as an ancestor
656 ancestors[n] = 1 # Mark as ancestor
656 ancestors[n] = 1 # Mark as ancestor
657 # Add non-nullid parents to list of nodes to tag.
657 # Add non-nullid parents to list of nodes to tag.
658 nodestotag.extend([p for p in self.parents(n) if
658 nodestotag.extend([p for p in self.parents(n) if
659 p != nullid])
659 p != nullid])
660 elif n in heads: # We've seen it before, is it a fake head?
660 elif n in heads: # We've seen it before, is it a fake head?
661 # So it is, real heads should not be the ancestors of
661 # So it is, real heads should not be the ancestors of
662 # any other heads.
662 # any other heads.
663 heads.pop(n)
663 heads.pop(n)
664 if not ancestors:
664 if not ancestors:
665 return nonodes
665 return nonodes
666 # Now that we have our set of ancestors, we want to remove any
666 # Now that we have our set of ancestors, we want to remove any
667 # roots that are not ancestors.
667 # roots that are not ancestors.
668
668
669 # If one of the roots was nullid, everything is included anyway.
669 # If one of the roots was nullid, everything is included anyway.
670 if lowestrev > nullrev:
670 if lowestrev > nullrev:
671 # But, since we weren't, let's recompute the lowest rev to not
671 # But, since we weren't, let's recompute the lowest rev to not
672 # include roots that aren't ancestors.
672 # include roots that aren't ancestors.
673
673
674 # Filter out roots that aren't ancestors of heads
674 # Filter out roots that aren't ancestors of heads
675 roots = [n for n in roots if n in ancestors]
675 roots = [n for n in roots if n in ancestors]
676 # Recompute the lowest revision
676 # Recompute the lowest revision
677 if roots:
677 if roots:
678 lowestrev = min([self.rev(n) for n in roots])
678 lowestrev = min([self.rev(n) for n in roots])
679 else:
679 else:
680 # No more roots? Return empty list
680 # No more roots? Return empty list
681 return nonodes
681 return nonodes
682 else:
682 else:
683 # We are descending from nullid, and don't need to care about
683 # We are descending from nullid, and don't need to care about
684 # any other roots.
684 # any other roots.
685 lowestrev = nullrev
685 lowestrev = nullrev
686 roots = [nullid]
686 roots = [nullid]
687 # Transform our roots list into a 'set' (i.e. a dictionary where the
687 # Transform our roots list into a 'set' (i.e. a dictionary where the
688 # values don't matter.
688 # values don't matter.
689 descendents = dict.fromkeys(roots, 1)
689 descendents = dict.fromkeys(roots, 1)
690 # Also, keep the original roots so we can filter out roots that aren't
690 # Also, keep the original roots so we can filter out roots that aren't
691 # 'real' roots (i.e. are descended from other roots).
691 # 'real' roots (i.e. are descended from other roots).
692 roots = descendents.copy()
692 roots = descendents.copy()
693 # Our topologically sorted list of output nodes.
693 # Our topologically sorted list of output nodes.
694 orderedout = []
694 orderedout = []
695 # Don't start at nullid since we don't want nullid in our output list,
695 # Don't start at nullid since we don't want nullid in our output list,
696 # and if nullid shows up in descedents, empty parents will look like
696 # and if nullid shows up in descedents, empty parents will look like
697 # they're descendents.
697 # they're descendents.
698 for r in xrange(max(lowestrev, 0), highestrev + 1):
698 for r in xrange(max(lowestrev, 0), highestrev + 1):
699 n = self.node(r)
699 n = self.node(r)
700 isdescendent = False
700 isdescendent = False
701 if lowestrev == nullrev: # Everybody is a descendent of nullid
701 if lowestrev == nullrev: # Everybody is a descendent of nullid
702 isdescendent = True
702 isdescendent = True
703 elif n in descendents:
703 elif n in descendents:
704 # n is already a descendent
704 # n is already a descendent
705 isdescendent = True
705 isdescendent = True
706 # This check only needs to be done here because all the roots
706 # This check only needs to be done here because all the roots
707 # will start being marked is descendents before the loop.
707 # will start being marked is descendents before the loop.
708 if n in roots:
708 if n in roots:
709 # If n was a root, check if it's a 'real' root.
709 # If n was a root, check if it's a 'real' root.
710 p = tuple(self.parents(n))
710 p = tuple(self.parents(n))
711 # If any of its parents are descendents, it's not a root.
711 # If any of its parents are descendents, it's not a root.
712 if (p[0] in descendents) or (p[1] in descendents):
712 if (p[0] in descendents) or (p[1] in descendents):
713 roots.pop(n)
713 roots.pop(n)
714 else:
714 else:
715 p = tuple(self.parents(n))
715 p = tuple(self.parents(n))
716 # A node is a descendent if either of its parents are
716 # A node is a descendent if either of its parents are
717 # descendents. (We seeded the dependents list with the roots
717 # descendents. (We seeded the dependents list with the roots
718 # up there, remember?)
718 # up there, remember?)
719 if (p[0] in descendents) or (p[1] in descendents):
719 if (p[0] in descendents) or (p[1] in descendents):
720 descendents[n] = 1
720 descendents[n] = 1
721 isdescendent = True
721 isdescendent = True
722 if isdescendent and ((ancestors is None) or (n in ancestors)):
722 if isdescendent and ((ancestors is None) or (n in ancestors)):
723 # Only include nodes that are both descendents and ancestors.
723 # Only include nodes that are both descendents and ancestors.
724 orderedout.append(n)
724 orderedout.append(n)
725 if (ancestors is not None) and (n in heads):
725 if (ancestors is not None) and (n in heads):
726 # We're trying to figure out which heads are reachable
726 # We're trying to figure out which heads are reachable
727 # from roots.
727 # from roots.
728 # Mark this head as having been reached
728 # Mark this head as having been reached
729 heads[n] = 1
729 heads[n] = 1
730 elif ancestors is None:
730 elif ancestors is None:
731 # Otherwise, we're trying to discover the heads.
731 # Otherwise, we're trying to discover the heads.
732 # Assume this is a head because if it isn't, the next step
732 # Assume this is a head because if it isn't, the next step
733 # will eventually remove it.
733 # will eventually remove it.
734 heads[n] = 1
734 heads[n] = 1
735 # But, obviously its parents aren't.
735 # But, obviously its parents aren't.
736 for p in self.parents(n):
736 for p in self.parents(n):
737 heads.pop(p, None)
737 heads.pop(p, None)
738 heads = [n for n in heads.iterkeys() if heads[n] != 0]
738 heads = [n for n in heads.iterkeys() if heads[n] != 0]
739 roots = roots.keys()
739 roots = roots.keys()
740 assert orderedout
740 assert orderedout
741 assert roots
741 assert roots
742 assert heads
742 assert heads
743 return (orderedout, roots, heads)
743 return (orderedout, roots, heads)
744
744
745 def heads(self, start=None, stop=None):
745 def heads(self, start=None, stop=None):
746 """return the list of all nodes that have no children
746 """return the list of all nodes that have no children
747
747
748 if start is specified, only heads that are descendants of
748 if start is specified, only heads that are descendants of
749 start will be returned
749 start will be returned
750 if stop is specified, it will consider all the revs from stop
750 if stop is specified, it will consider all the revs from stop
751 as if they had no children
751 as if they had no children
752 """
752 """
753 if start is None and stop is None:
753 if start is None and stop is None:
754 count = self.count()
754 count = self.count()
755 if not count:
755 if not count:
756 return [nullid]
756 return [nullid]
757 ishead = [1] * (count + 1)
757 ishead = [1] * (count + 1)
758 index = self.index
758 index = self.index
759 for r in xrange(count):
759 for r in xrange(count):
760 e = index[r]
760 e = index[r]
761 ishead[e[5]] = ishead[e[6]] = 0
761 ishead[e[5]] = ishead[e[6]] = 0
762 return [self.node(r) for r in xrange(count) if ishead[r]]
762 return [self.node(r) for r in xrange(count) if ishead[r]]
763
763
764 if start is None:
764 if start is None:
765 start = nullid
765 start = nullid
766 if stop is None:
766 if stop is None:
767 stop = []
767 stop = []
768 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
768 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
769 startrev = self.rev(start)
769 startrev = self.rev(start)
770 reachable = {startrev: 1}
770 reachable = {startrev: 1}
771 heads = {startrev: 1}
771 heads = {startrev: 1}
772
772
773 parentrevs = self.parentrevs
773 parentrevs = self.parentrevs
774 for r in xrange(startrev + 1, self.count()):
774 for r in xrange(startrev + 1, self.count()):
775 for p in parentrevs(r):
775 for p in parentrevs(r):
776 if p in reachable:
776 if p in reachable:
777 if r not in stoprevs:
777 if r not in stoprevs:
778 reachable[r] = 1
778 reachable[r] = 1
779 heads[r] = 1
779 heads[r] = 1
780 if p in heads and p not in stoprevs:
780 if p in heads and p not in stoprevs:
781 del heads[p]
781 del heads[p]
782
782
783 return [self.node(r) for r in heads]
783 return [self.node(r) for r in heads]
784
784
785 def children(self, node):
785 def children(self, node):
786 """find the children of a given node"""
786 """find the children of a given node"""
787 c = []
787 c = []
788 p = self.rev(node)
788 p = self.rev(node)
789 for r in range(p + 1, self.count()):
789 for r in range(p + 1, self.count()):
790 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
790 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
791 if prevs:
791 if prevs:
792 for pr in prevs:
792 for pr in prevs:
793 if pr == p:
793 if pr == p:
794 c.append(self.node(r))
794 c.append(self.node(r))
795 elif p == nullrev:
795 elif p == nullrev:
796 c.append(self.node(r))
796 c.append(self.node(r))
797 return c
797 return c
798
798
799 def _match(self, id):
799 def _match(self, id):
800 if isinstance(id, (long, int)):
800 if isinstance(id, (long, int)):
801 # rev
801 # rev
802 return self.node(id)
802 return self.node(id)
803 if len(id) == 20:
803 if len(id) == 20:
804 # possibly a binary node
804 # possibly a binary node
805 # odds of a binary node being all hex in ASCII are 1 in 10**25
805 # odds of a binary node being all hex in ASCII are 1 in 10**25
806 try:
806 try:
807 node = id
807 node = id
808 r = self.rev(node) # quick search the index
808 r = self.rev(node) # quick search the index
809 return node
809 return node
810 except LookupError:
810 except LookupError:
811 pass # may be partial hex id
811 pass # may be partial hex id
812 try:
812 try:
813 # str(rev)
813 # str(rev)
814 rev = int(id)
814 rev = int(id)
815 if str(rev) != id:
815 if str(rev) != id:
816 raise ValueError
816 raise ValueError
817 if rev < 0:
817 if rev < 0:
818 rev = self.count() + rev
818 rev = self.count() + rev
819 if rev < 0 or rev >= self.count():
819 if rev < 0 or rev >= self.count():
820 raise ValueError
820 raise ValueError
821 return self.node(rev)
821 return self.node(rev)
822 except (ValueError, OverflowError):
822 except (ValueError, OverflowError):
823 pass
823 pass
824 if len(id) == 40:
824 if len(id) == 40:
825 try:
825 try:
826 # a full hex nodeid?
826 # a full hex nodeid?
827 node = bin(id)
827 node = bin(id)
828 r = self.rev(node)
828 r = self.rev(node)
829 return node
829 return node
830 except TypeError:
830 except TypeError:
831 pass
831 pass
832
832
833 def _partialmatch(self, id):
833 def _partialmatch(self, id):
834 if len(id) < 40:
834 if len(id) < 40:
835 try:
835 try:
836 # hex(node)[:...]
836 # hex(node)[:...]
837 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
837 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
838 node = None
838 node = None
839 for n in self.nodemap:
839 for n in self.nodemap:
840 if n.startswith(bin_id) and hex(n).startswith(id):
840 if n.startswith(bin_id) and hex(n).startswith(id):
841 if node is not None:
841 if node is not None:
842 raise LookupError(id, self.indexfile,
842 raise LookupError(id, self.indexfile,
843 _('ambiguous identifier'))
843 _('ambiguous identifier'))
844 node = n
844 node = n
845 if node is not None:
845 if node is not None:
846 return node
846 return node
847 except TypeError:
847 except TypeError:
848 pass
848 pass
849
849
850 def lookup(self, id):
850 def lookup(self, id):
851 """locate a node based on:
851 """locate a node based on:
852 - revision number or str(revision number)
852 - revision number or str(revision number)
853 - nodeid or subset of hex nodeid
853 - nodeid or subset of hex nodeid
854 """
854 """
855 n = self._match(id)
855 n = self._match(id)
856 if n is not None:
856 if n is not None:
857 return n
857 return n
858 n = self._partialmatch(id)
858 n = self._partialmatch(id)
859 if n:
859 if n:
860 return n
860 return n
861
861
862 raise LookupError(id, self.indexfile, _('no match found'))
862 raise LookupError(id, self.indexfile, _('no match found'))
863
863
864 def cmp(self, node, text):
864 def cmp(self, node, text):
865 """compare text with a given file revision"""
865 """compare text with a given file revision"""
866 p1, p2 = self.parents(node)
866 p1, p2 = self.parents(node)
867 return hash(text, p1, p2) != node
867 return hash(text, p1, p2) != node
868
868
869 def chunk(self, rev, df=None):
869 def chunk(self, rev, df=None):
870 def loadcache(df):
870 def loadcache(df):
871 if not df:
871 if not df:
872 if self._inline:
872 if self._inline:
873 df = self.opener(self.indexfile)
873 df = self.opener(self.indexfile)
874 else:
874 else:
875 df = self.opener(self.datafile)
875 df = self.opener(self.datafile)
876 df.seek(start)
876 df.seek(start)
877 self._chunkcache = (start, df.read(cache_length))
877 self._chunkcache = (start, df.read(cache_length))
878
878
879 start, length = self.start(rev), self.length(rev)
879 start, length = self.start(rev), self.length(rev)
880 if self._inline:
880 if self._inline:
881 start += (rev + 1) * self._io.size
881 start += (rev + 1) * self._io.size
882 end = start + length
882 end = start + length
883
883
884 offset = 0
884 offset = 0
885 if not self._chunkcache:
885 if not self._chunkcache:
886 cache_length = max(65536, length)
886 cache_length = max(65536, length)
887 loadcache(df)
887 loadcache(df)
888 else:
888 else:
889 cache_start = self._chunkcache[0]
889 cache_start = self._chunkcache[0]
890 cache_length = len(self._chunkcache[1])
890 cache_length = len(self._chunkcache[1])
891 cache_end = cache_start + cache_length
891 cache_end = cache_start + cache_length
892 if start >= cache_start and end <= cache_end:
892 if start >= cache_start and end <= cache_end:
893 # it is cached
893 # it is cached
894 offset = start - cache_start
894 offset = start - cache_start
895 else:
895 else:
896 cache_length = max(65536, length)
896 cache_length = max(65536, length)
897 loadcache(df)
897 loadcache(df)
898
898
899 # avoid copying large chunks
899 # avoid copying large chunks
900 c = self._chunkcache[1]
900 c = self._chunkcache[1]
901 if cache_length != length:
901 if cache_length != length:
902 c = c[offset:offset + length]
902 c = c[offset:offset + length]
903
903
904 return decompress(c)
904 return decompress(c)
905
905
906 def delta(self, node):
906 def delta(self, node):
907 """return or calculate a delta between a node and its predecessor"""
907 """return or calculate a delta between a node and its predecessor"""
908 r = self.rev(node)
908 r = self.rev(node)
909 return self.revdiff(r - 1, r)
909 return self.revdiff(r - 1, r)
910
910
911 def revdiff(self, rev1, rev2):
911 def revdiff(self, rev1, rev2):
912 """return or calculate a delta between two revisions"""
912 """return or calculate a delta between two revisions"""
913 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
913 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
914 return self.chunk(rev2)
914 return self.chunk(rev2)
915
915
916 return mdiff.textdiff(self.revision(self.node(rev1)),
916 return mdiff.textdiff(self.revision(self.node(rev1)),
917 self.revision(self.node(rev2)))
917 self.revision(self.node(rev2)))
918
918
919 def revision(self, node):
919 def revision(self, node):
920 """return an uncompressed revision of a given"""
920 """return an uncompressed revision of a given"""
921 if node == nullid:
921 if node == nullid:
922 return ""
922 return ""
923 if self._cache and self._cache[0] == node:
923 if self._cache and self._cache[0] == node:
924 return str(self._cache[2])
924 return str(self._cache[2])
925
925
926 # look up what we need to read
926 # look up what we need to read
927 text = None
927 text = None
928 rev = self.rev(node)
928 rev = self.rev(node)
929 base = self.base(rev)
929 base = self.base(rev)
930
930
931 # check rev flags
931 # check rev flags
932 if self.index[rev][0] & 0xFFFF:
932 if self.index[rev][0] & 0xFFFF:
933 raise RevlogError(_('incompatible revision flag %x') %
933 raise RevlogError(_('incompatible revision flag %x') %
934 (self.index[rev][0] & 0xFFFF))
934 (self.index[rev][0] & 0xFFFF))
935
935
936 df = None
936 df = None
937
937
938 # do we have useful data cached?
938 # do we have useful data cached?
939 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
939 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
940 base = self._cache[1]
940 base = self._cache[1]
941 text = str(self._cache[2])
941 text = str(self._cache[2])
942 self._loadindex(base, rev + 1)
942 self._loadindex(base, rev + 1)
943 if not self._inline and rev > base + 1:
943 if not self._inline and rev > base + 1:
944 df = self.opener(self.datafile)
944 df = self.opener(self.datafile)
945 else:
945 else:
946 self._loadindex(base, rev + 1)
946 self._loadindex(base, rev + 1)
947 if not self._inline and rev > base:
947 if not self._inline and rev > base:
948 df = self.opener(self.datafile)
948 df = self.opener(self.datafile)
949 text = self.chunk(base, df=df)
949 text = self.chunk(base, df=df)
950
950
951 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
951 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
952 text = mdiff.patches(text, bins)
952 text = mdiff.patches(text, bins)
953 p1, p2 = self.parents(node)
953 p1, p2 = self.parents(node)
954 if node != hash(text, p1, p2):
954 if node != hash(text, p1, p2):
955 raise RevlogError(_("integrity check failed on %s:%d")
955 raise RevlogError(_("integrity check failed on %s:%d")
956 % (self.datafile, rev))
956 % (self.datafile, rev))
957
957
958 self._cache = (node, rev, text)
958 self._cache = (node, rev, text)
959 return text
959 return text
960
960
961 def checkinlinesize(self, tr, fp=None):
961 def checkinlinesize(self, tr, fp=None):
962 if not self._inline:
962 if not self._inline:
963 return
963 return
964 if not fp:
964 if not fp:
965 fp = self.opener(self.indexfile, 'r')
965 fp = self.opener(self.indexfile, 'r')
966 fp.seek(0, 2)
966 fp.seek(0, 2)
967 size = fp.tell()
967 size = fp.tell()
968 if size < 131072:
968 if size < 131072:
969 return
969 return
970 trinfo = tr.find(self.indexfile)
970 trinfo = tr.find(self.indexfile)
971 if trinfo == None:
971 if trinfo == None:
972 raise RevlogError(_("%s not found in the transaction")
972 raise RevlogError(_("%s not found in the transaction")
973 % self.indexfile)
973 % self.indexfile)
974
974
975 trindex = trinfo[2]
975 trindex = trinfo[2]
976 dataoff = self.start(trindex)
976 dataoff = self.start(trindex)
977
977
978 tr.add(self.datafile, dataoff)
978 tr.add(self.datafile, dataoff)
979 df = self.opener(self.datafile, 'w')
979 df = self.opener(self.datafile, 'w')
980 try:
980 try:
981 calc = self._io.size
981 calc = self._io.size
982 for r in xrange(self.count()):
982 for r in xrange(self.count()):
983 start = self.start(r) + (r + 1) * calc
983 start = self.start(r) + (r + 1) * calc
984 length = self.length(r)
984 length = self.length(r)
985 fp.seek(start)
985 fp.seek(start)
986 d = fp.read(length)
986 d = fp.read(length)
987 df.write(d)
987 df.write(d)
988 finally:
988 finally:
989 df.close()
989 df.close()
990
990
991 fp.close()
991 fp.close()
992 fp = self.opener(self.indexfile, 'w', atomictemp=True)
992 fp = self.opener(self.indexfile, 'w', atomictemp=True)
993 self.version &= ~(REVLOGNGINLINEDATA)
993 self.version &= ~(REVLOGNGINLINEDATA)
994 self._inline = False
994 self._inline = False
995 for i in xrange(self.count()):
995 for i in xrange(self.count()):
996 e = self._io.packentry(self.index[i], self.node, self.version, i)
996 e = self._io.packentry(self.index[i], self.node, self.version, i)
997 fp.write(e)
997 fp.write(e)
998
998
999 # if we don't call rename, the temp file will never replace the
999 # if we don't call rename, the temp file will never replace the
1000 # real index
1000 # real index
1001 fp.rename()
1001 fp.rename()
1002
1002
1003 tr.replace(self.indexfile, trindex * calc)
1003 tr.replace(self.indexfile, trindex * calc)
1004 self._chunkcache = None
1004 self._chunkcache = None
1005
1005
1006 def addrevision(self, text, transaction, link, p1, p2, d=None):
1006 def addrevision(self, text, transaction, link, p1, p2, d=None):
1007 """add a revision to the log
1007 """add a revision to the log
1008
1008
1009 text - the revision data to add
1009 text - the revision data to add
1010 transaction - the transaction object used for rollback
1010 transaction - the transaction object used for rollback
1011 link - the linkrev data to add
1011 link - the linkrev data to add
1012 p1, p2 - the parent nodeids of the revision
1012 p1, p2 - the parent nodeids of the revision
1013 d - an optional precomputed delta
1013 d - an optional precomputed delta
1014 """
1014 """
1015 dfh = None
1015 dfh = None
1016 if not self._inline:
1016 if not self._inline:
1017 dfh = self.opener(self.datafile, "a")
1017 dfh = self.opener(self.datafile, "a")
1018 ifh = self.opener(self.indexfile, "a+")
1018 ifh = self.opener(self.indexfile, "a+")
1019 try:
1019 try:
1020 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1020 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1021 finally:
1021 finally:
1022 if dfh:
1022 if dfh:
1023 dfh.close()
1023 dfh.close()
1024 ifh.close()
1024 ifh.close()
1025
1025
1026 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1026 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1027 node = hash(text, p1, p2)
1027 node = hash(text, p1, p2)
1028 if node in self.nodemap:
1028 if node in self.nodemap:
1029 return node
1029 return node
1030
1030
1031 curr = self.count()
1031 curr = self.count()
1032 prev = curr - 1
1032 prev = curr - 1
1033 base = self.base(prev)
1033 base = self.base(prev)
1034 offset = self.end(prev)
1034 offset = self.end(prev)
1035
1035
1036 if curr:
1036 if curr:
1037 if not d:
1037 if not d:
1038 ptext = self.revision(self.node(prev))
1038 ptext = self.revision(self.node(prev))
1039 d = mdiff.textdiff(ptext, text)
1039 d = mdiff.textdiff(ptext, text)
1040 data = compress(d)
1040 data = compress(d)
1041 l = len(data[1]) + len(data[0])
1041 l = len(data[1]) + len(data[0])
1042 dist = l + offset - self.start(base)
1042 dist = l + offset - self.start(base)
1043
1043
1044 # full versions are inserted when the needed deltas
1044 # full versions are inserted when the needed deltas
1045 # become comparable to the uncompressed text
1045 # become comparable to the uncompressed text
1046 if not curr or dist > len(text) * 2:
1046 if not curr or dist > len(text) * 2:
1047 data = compress(text)
1047 data = compress(text)
1048 l = len(data[1]) + len(data[0])
1048 l = len(data[1]) + len(data[0])
1049 base = curr
1049 base = curr
1050
1050
1051 e = (offset_type(offset, 0), l, len(text),
1051 e = (offset_type(offset, 0), l, len(text),
1052 base, link, self.rev(p1), self.rev(p2), node)
1052 base, link, self.rev(p1), self.rev(p2), node)
1053 self.index.insert(-1, e)
1053 self.index.insert(-1, e)
1054 self.nodemap[node] = curr
1054 self.nodemap[node] = curr
1055
1055
1056 entry = self._io.packentry(e, self.node, self.version, curr)
1056 entry = self._io.packentry(e, self.node, self.version, curr)
1057 if not self._inline:
1057 if not self._inline:
1058 transaction.add(self.datafile, offset)
1058 transaction.add(self.datafile, offset)
1059 transaction.add(self.indexfile, curr * len(entry))
1059 transaction.add(self.indexfile, curr * len(entry))
1060 if data[0]:
1060 if data[0]:
1061 dfh.write(data[0])
1061 dfh.write(data[0])
1062 dfh.write(data[1])
1062 dfh.write(data[1])
1063 dfh.flush()
1063 dfh.flush()
1064 ifh.write(entry)
1064 ifh.write(entry)
1065 else:
1065 else:
1066 offset += curr * self._io.size
1066 offset += curr * self._io.size
1067 transaction.add(self.indexfile, offset, curr)
1067 transaction.add(self.indexfile, offset, curr)
1068 ifh.write(entry)
1068 ifh.write(entry)
1069 ifh.write(data[0])
1069 ifh.write(data[0])
1070 ifh.write(data[1])
1070 ifh.write(data[1])
1071 self.checkinlinesize(transaction, ifh)
1071 self.checkinlinesize(transaction, ifh)
1072
1072
1073 self._cache = (node, curr, text)
1073 self._cache = (node, curr, text)
1074 return node
1074 return node
1075
1075
1076 def ancestor(self, a, b):
1076 def ancestor(self, a, b):
1077 """calculate the least common ancestor of nodes a and b"""
1077 """calculate the least common ancestor of nodes a and b"""
1078
1078
1079 def parents(rev):
1079 def parents(rev):
1080 return [p for p in self.parentrevs(rev) if p != nullrev]
1080 return [p for p in self.parentrevs(rev) if p != nullrev]
1081
1081
1082 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1082 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1083 if c is None:
1083 if c is None:
1084 return nullid
1084 return nullid
1085
1085
1086 return self.node(c)
1086 return self.node(c)
1087
1087
1088 def group(self, nodelist, lookup, infocollect=None):
1088 def group(self, nodelist, lookup, infocollect=None):
1089 """calculate a delta group
1089 """calculate a delta group
1090
1090
1091 Given a list of changeset revs, return a set of deltas and
1091 Given a list of changeset revs, return a set of deltas and
1092 metadata corresponding to nodes. the first delta is
1092 metadata corresponding to nodes. the first delta is
1093 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1093 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1094 have this parent as it has all history before these
1094 have this parent as it has all history before these
1095 changesets. parent is parent[0]
1095 changesets. parent is parent[0]
1096 """
1096 """
1097 revs = [self.rev(n) for n in nodelist]
1097 revs = [self.rev(n) for n in nodelist]
1098
1098
1099 # if we don't have any revisions touched by these changesets, bail
1099 # if we don't have any revisions touched by these changesets, bail
1100 if not revs:
1100 if not revs:
1101 yield changegroup.closechunk()
1101 yield changegroup.closechunk()
1102 return
1102 return
1103
1103
1104 # add the parent of the first rev
1104 # add the parent of the first rev
1105 p = self.parents(self.node(revs[0]))[0]
1105 p = self.parents(self.node(revs[0]))[0]
1106 revs.insert(0, self.rev(p))
1106 revs.insert(0, self.rev(p))
1107
1107
1108 # build deltas
1108 # build deltas
1109 for d in xrange(0, len(revs) - 1):
1109 for d in xrange(0, len(revs) - 1):
1110 a, b = revs[d], revs[d + 1]
1110 a, b = revs[d], revs[d + 1]
1111 nb = self.node(b)
1111 nb = self.node(b)
1112
1112
1113 if infocollect is not None:
1113 if infocollect is not None:
1114 infocollect(nb)
1114 infocollect(nb)
1115
1115
1116 p = self.parents(nb)
1116 p = self.parents(nb)
1117 meta = nb + p[0] + p[1] + lookup(nb)
1117 meta = nb + p[0] + p[1] + lookup(nb)
1118 if a == -1:
1118 if a == -1:
1119 d = self.revision(nb)
1119 d = self.revision(nb)
1120 meta += mdiff.trivialdiffheader(len(d))
1120 meta += mdiff.trivialdiffheader(len(d))
1121 else:
1121 else:
1122 d = self.revdiff(a, b)
1122 d = self.revdiff(a, b)
1123 yield changegroup.chunkheader(len(meta) + len(d))
1123 yield changegroup.chunkheader(len(meta) + len(d))
1124 yield meta
1124 yield meta
1125 if len(d) > 2**20:
1125 if len(d) > 2**20:
1126 pos = 0
1126 pos = 0
1127 while pos < len(d):
1127 while pos < len(d):
1128 pos2 = pos + 2 ** 18
1128 pos2 = pos + 2 ** 18
1129 yield d[pos:pos2]
1129 yield d[pos:pos2]
1130 pos = pos2
1130 pos = pos2
1131 else:
1131 else:
1132 yield d
1132 yield d
1133
1133
1134 yield changegroup.closechunk()
1134 yield changegroup.closechunk()
1135
1135
1136 def addgroup(self, revs, linkmapper, transaction, unique=0):
1136 def addgroup(self, revs, linkmapper, transaction):
1137 """
1137 """
1138 add a delta group
1138 add a delta group
1139
1139
1140 given a set of deltas, add them to the revision log. the
1140 given a set of deltas, add them to the revision log. the
1141 first delta is against its parent, which should be in our
1141 first delta is against its parent, which should be in our
1142 log, the rest are against the previous delta.
1142 log, the rest are against the previous delta.
1143 """
1143 """
1144
1144
1145 #track the base of the current delta log
1145 #track the base of the current delta log
1146 r = self.count()
1146 r = self.count()
1147 t = r - 1
1147 t = r - 1
1148 node = None
1148 node = None
1149
1149
1150 base = prev = nullrev
1150 base = prev = nullrev
1151 start = end = textlen = 0
1151 start = end = textlen = 0
1152 if r:
1152 if r:
1153 end = self.end(t)
1153 end = self.end(t)
1154
1154
1155 ifh = self.opener(self.indexfile, "a+")
1155 ifh = self.opener(self.indexfile, "a+")
1156 isize = r * self._io.size
1156 isize = r * self._io.size
1157 if self._inline:
1157 if self._inline:
1158 transaction.add(self.indexfile, end + isize, r)
1158 transaction.add(self.indexfile, end + isize, r)
1159 dfh = None
1159 dfh = None
1160 else:
1160 else:
1161 transaction.add(self.indexfile, isize, r)
1161 transaction.add(self.indexfile, isize, r)
1162 transaction.add(self.datafile, end)
1162 transaction.add(self.datafile, end)
1163 dfh = self.opener(self.datafile, "a")
1163 dfh = self.opener(self.datafile, "a")
1164
1164
1165 try:
1165 try:
1166 # loop through our set of deltas
1166 # loop through our set of deltas
1167 chain = None
1167 chain = None
1168 for chunk in revs:
1168 for chunk in revs:
1169 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1169 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1170 link = linkmapper(cs)
1170 link = linkmapper(cs)
1171 if node in self.nodemap:
1171 if node in self.nodemap:
1172 # this can happen if two branches make the same change
1172 # this can happen if two branches make the same change
1173 # if unique:
1174 # raise RevlogError(_("already have %s") % hex(node[:4]))
1175 chain = node
1173 chain = node
1176 continue
1174 continue
1177 delta = buffer(chunk, 80)
1175 delta = buffer(chunk, 80)
1178 del chunk
1176 del chunk
1179
1177
1180 for p in (p1, p2):
1178 for p in (p1, p2):
1181 if not p in self.nodemap:
1179 if not p in self.nodemap:
1182 raise LookupError(p, self.indexfile, _('unknown parent'))
1180 raise LookupError(p, self.indexfile, _('unknown parent'))
1183
1181
1184 if not chain:
1182 if not chain:
1185 # retrieve the parent revision of the delta chain
1183 # retrieve the parent revision of the delta chain
1186 chain = p1
1184 chain = p1
1187 if not chain in self.nodemap:
1185 if not chain in self.nodemap:
1188 raise LookupError(chain, self.indexfile, _('unknown base'))
1186 raise LookupError(chain, self.indexfile, _('unknown base'))
1189
1187
1190 # full versions are inserted when the needed deltas become
1188 # full versions are inserted when the needed deltas become
1191 # comparable to the uncompressed text or when the previous
1189 # comparable to the uncompressed text or when the previous
1192 # version is not the one we have a delta against. We use
1190 # version is not the one we have a delta against. We use
1193 # the size of the previous full rev as a proxy for the
1191 # the size of the previous full rev as a proxy for the
1194 # current size.
1192 # current size.
1195
1193
1196 if chain == prev:
1194 if chain == prev:
1197 cdelta = compress(delta)
1195 cdelta = compress(delta)
1198 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1196 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1199 textlen = mdiff.patchedsize(textlen, delta)
1197 textlen = mdiff.patchedsize(textlen, delta)
1200
1198
1201 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1199 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1202 # flush our writes here so we can read it in revision
1200 # flush our writes here so we can read it in revision
1203 if dfh:
1201 if dfh:
1204 dfh.flush()
1202 dfh.flush()
1205 ifh.flush()
1203 ifh.flush()
1206 text = self.revision(chain)
1204 text = self.revision(chain)
1207 if len(text) == 0:
1205 if len(text) == 0:
1208 # skip over trivial delta header
1206 # skip over trivial delta header
1209 text = buffer(delta, 12)
1207 text = buffer(delta, 12)
1210 else:
1208 else:
1211 text = mdiff.patches(text, [delta])
1209 text = mdiff.patches(text, [delta])
1212 del delta
1210 del delta
1213 chk = self._addrevision(text, transaction, link, p1, p2, None,
1211 chk = self._addrevision(text, transaction, link, p1, p2, None,
1214 ifh, dfh)
1212 ifh, dfh)
1215 if not dfh and not self._inline:
1213 if not dfh and not self._inline:
1216 # addrevision switched from inline to conventional
1214 # addrevision switched from inline to conventional
1217 # reopen the index
1215 # reopen the index
1218 dfh = self.opener(self.datafile, "a")
1216 dfh = self.opener(self.datafile, "a")
1219 ifh = self.opener(self.indexfile, "a")
1217 ifh = self.opener(self.indexfile, "a")
1220 if chk != node:
1218 if chk != node:
1221 raise RevlogError(_("consistency error adding group"))
1219 raise RevlogError(_("consistency error adding group"))
1222 textlen = len(text)
1220 textlen = len(text)
1223 else:
1221 else:
1224 e = (offset_type(end, 0), cdeltalen, textlen, base,
1222 e = (offset_type(end, 0), cdeltalen, textlen, base,
1225 link, self.rev(p1), self.rev(p2), node)
1223 link, self.rev(p1), self.rev(p2), node)
1226 self.index.insert(-1, e)
1224 self.index.insert(-1, e)
1227 self.nodemap[node] = r
1225 self.nodemap[node] = r
1228 entry = self._io.packentry(e, self.node, self.version, r)
1226 entry = self._io.packentry(e, self.node, self.version, r)
1229 if self._inline:
1227 if self._inline:
1230 ifh.write(entry)
1228 ifh.write(entry)
1231 ifh.write(cdelta[0])
1229 ifh.write(cdelta[0])
1232 ifh.write(cdelta[1])
1230 ifh.write(cdelta[1])
1233 self.checkinlinesize(transaction, ifh)
1231 self.checkinlinesize(transaction, ifh)
1234 if not self._inline:
1232 if not self._inline:
1235 dfh = self.opener(self.datafile, "a")
1233 dfh = self.opener(self.datafile, "a")
1236 ifh = self.opener(self.indexfile, "a")
1234 ifh = self.opener(self.indexfile, "a")
1237 else:
1235 else:
1238 dfh.write(cdelta[0])
1236 dfh.write(cdelta[0])
1239 dfh.write(cdelta[1])
1237 dfh.write(cdelta[1])
1240 ifh.write(entry)
1238 ifh.write(entry)
1241
1239
1242 t, r, chain, prev = r, r + 1, node, node
1240 t, r, chain, prev = r, r + 1, node, node
1243 base = self.base(t)
1241 base = self.base(t)
1244 start = self.start(base)
1242 start = self.start(base)
1245 end = self.end(t)
1243 end = self.end(t)
1246 finally:
1244 finally:
1247 if dfh:
1245 if dfh:
1248 dfh.close()
1246 dfh.close()
1249 ifh.close()
1247 ifh.close()
1250
1248
1251 return node
1249 return node
1252
1250
1253 def strip(self, minlink):
1251 def strip(self, minlink):
1254 """truncate the revlog on the first revision with a linkrev >= minlink
1252 """truncate the revlog on the first revision with a linkrev >= minlink
1255
1253
1256 This function is called when we're stripping revision minlink and
1254 This function is called when we're stripping revision minlink and
1257 its descendants from the repository.
1255 its descendants from the repository.
1258
1256
1259 We have to remove all revisions with linkrev >= minlink, because
1257 We have to remove all revisions with linkrev >= minlink, because
1260 the equivalent changelog revisions will be renumbered after the
1258 the equivalent changelog revisions will be renumbered after the
1261 strip.
1259 strip.
1262
1260
1263 So we truncate the revlog on the first of these revisions, and
1261 So we truncate the revlog on the first of these revisions, and
1264 trust that the caller has saved the revisions that shouldn't be
1262 trust that the caller has saved the revisions that shouldn't be
1265 removed and that it'll readd them after this truncation.
1263 removed and that it'll readd them after this truncation.
1266 """
1264 """
1267 if self.count() == 0:
1265 if self.count() == 0:
1268 return
1266 return
1269
1267
1270 if isinstance(self.index, lazyindex):
1268 if isinstance(self.index, lazyindex):
1271 self._loadindexmap()
1269 self._loadindexmap()
1272
1270
1273 for rev in xrange(0, self.count()):
1271 for rev in xrange(0, self.count()):
1274 if self.index[rev][4] >= minlink:
1272 if self.index[rev][4] >= minlink:
1275 break
1273 break
1276 else:
1274 else:
1277 return
1275 return
1278
1276
1279 # first truncate the files on disk
1277 # first truncate the files on disk
1280 end = self.start(rev)
1278 end = self.start(rev)
1281 if not self._inline:
1279 if not self._inline:
1282 df = self.opener(self.datafile, "a")
1280 df = self.opener(self.datafile, "a")
1283 df.truncate(end)
1281 df.truncate(end)
1284 end = rev * self._io.size
1282 end = rev * self._io.size
1285 else:
1283 else:
1286 end += rev * self._io.size
1284 end += rev * self._io.size
1287
1285
1288 indexf = self.opener(self.indexfile, "a")
1286 indexf = self.opener(self.indexfile, "a")
1289 indexf.truncate(end)
1287 indexf.truncate(end)
1290
1288
1291 # then reset internal state in memory to forget those revisions
1289 # then reset internal state in memory to forget those revisions
1292 self._cache = None
1290 self._cache = None
1293 self._chunkcache = None
1291 self._chunkcache = None
1294 for x in xrange(rev, self.count()):
1292 for x in xrange(rev, self.count()):
1295 del self.nodemap[self.node(x)]
1293 del self.nodemap[self.node(x)]
1296
1294
1297 del self.index[rev:-1]
1295 del self.index[rev:-1]
1298
1296
1299 def checksize(self):
1297 def checksize(self):
1300 expected = 0
1298 expected = 0
1301 if self.count():
1299 if self.count():
1302 expected = max(0, self.end(self.count() - 1))
1300 expected = max(0, self.end(self.count() - 1))
1303
1301
1304 try:
1302 try:
1305 f = self.opener(self.datafile)
1303 f = self.opener(self.datafile)
1306 f.seek(0, 2)
1304 f.seek(0, 2)
1307 actual = f.tell()
1305 actual = f.tell()
1308 dd = actual - expected
1306 dd = actual - expected
1309 except IOError, inst:
1307 except IOError, inst:
1310 if inst.errno != errno.ENOENT:
1308 if inst.errno != errno.ENOENT:
1311 raise
1309 raise
1312 dd = 0
1310 dd = 0
1313
1311
1314 try:
1312 try:
1315 f = self.opener(self.indexfile)
1313 f = self.opener(self.indexfile)
1316 f.seek(0, 2)
1314 f.seek(0, 2)
1317 actual = f.tell()
1315 actual = f.tell()
1318 s = self._io.size
1316 s = self._io.size
1319 i = max(0, actual / s)
1317 i = max(0, actual / s)
1320 di = actual - (i * s)
1318 di = actual - (i * s)
1321 if self._inline:
1319 if self._inline:
1322 databytes = 0
1320 databytes = 0
1323 for r in xrange(self.count()):
1321 for r in xrange(self.count()):
1324 databytes += max(0, self.length(r))
1322 databytes += max(0, self.length(r))
1325 dd = 0
1323 dd = 0
1326 di = actual - self.count() * s - databytes
1324 di = actual - self.count() * s - databytes
1327 except IOError, inst:
1325 except IOError, inst:
1328 if inst.errno != errno.ENOENT:
1326 if inst.errno != errno.ENOENT:
1329 raise
1327 raise
1330 di = 0
1328 di = 0
1331
1329
1332 return (dd, di)
1330 return (dd, di)
General Comments 0
You need to be logged in to leave comments. Login now