##// END OF EJS Templates
revlog: stop exporting node.short
Matt Mackall -
r14393:bdf44e63 default
parent child Browse files
Show More
@@ -1,631 +1,632 b''
1 # Patch transplanting extension for Mercurial
1 # Patch transplanting extension for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to transplant changesets from another branch
8 '''command to transplant changesets from another branch
9
9
10 This extension allows you to transplant patches from another branch.
10 This extension allows you to transplant patches from another branch.
11
11
12 Transplanted patches are recorded in .hg/transplant/transplants, as a
12 Transplanted patches are recorded in .hg/transplant/transplants, as a
13 map from a changeset hash to its hash in the source repository.
13 map from a changeset hash to its hash in the source repository.
14 '''
14 '''
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 import os, tempfile
17 import os, tempfile
18 from mercurial.node import short
18 from mercurial import bundlerepo, hg, merge, match
19 from mercurial import bundlerepo, hg, merge, match
19 from mercurial import patch, revlog, scmutil, util, error, cmdutil
20 from mercurial import patch, revlog, scmutil, util, error, cmdutil
20 from mercurial import revset, templatekw
21 from mercurial import revset, templatekw
21
22
22 cmdtable = {}
23 cmdtable = {}
23 command = cmdutil.command(cmdtable)
24 command = cmdutil.command(cmdtable)
24
25
25 class transplantentry(object):
26 class transplantentry(object):
26 def __init__(self, lnode, rnode):
27 def __init__(self, lnode, rnode):
27 self.lnode = lnode
28 self.lnode = lnode
28 self.rnode = rnode
29 self.rnode = rnode
29
30
30 class transplants(object):
31 class transplants(object):
31 def __init__(self, path=None, transplantfile=None, opener=None):
32 def __init__(self, path=None, transplantfile=None, opener=None):
32 self.path = path
33 self.path = path
33 self.transplantfile = transplantfile
34 self.transplantfile = transplantfile
34 self.opener = opener
35 self.opener = opener
35
36
36 if not opener:
37 if not opener:
37 self.opener = scmutil.opener(self.path)
38 self.opener = scmutil.opener(self.path)
38 self.transplants = {}
39 self.transplants = {}
39 self.dirty = False
40 self.dirty = False
40 self.read()
41 self.read()
41
42
42 def read(self):
43 def read(self):
43 abspath = os.path.join(self.path, self.transplantfile)
44 abspath = os.path.join(self.path, self.transplantfile)
44 if self.transplantfile and os.path.exists(abspath):
45 if self.transplantfile and os.path.exists(abspath):
45 for line in self.opener.read(self.transplantfile).splitlines():
46 for line in self.opener.read(self.transplantfile).splitlines():
46 lnode, rnode = map(revlog.bin, line.split(':'))
47 lnode, rnode = map(revlog.bin, line.split(':'))
47 list = self.transplants.setdefault(rnode, [])
48 list = self.transplants.setdefault(rnode, [])
48 list.append(transplantentry(lnode, rnode))
49 list.append(transplantentry(lnode, rnode))
49
50
50 def write(self):
51 def write(self):
51 if self.dirty and self.transplantfile:
52 if self.dirty and self.transplantfile:
52 if not os.path.isdir(self.path):
53 if not os.path.isdir(self.path):
53 os.mkdir(self.path)
54 os.mkdir(self.path)
54 fp = self.opener(self.transplantfile, 'w')
55 fp = self.opener(self.transplantfile, 'w')
55 for list in self.transplants.itervalues():
56 for list in self.transplants.itervalues():
56 for t in list:
57 for t in list:
57 l, r = map(revlog.hex, (t.lnode, t.rnode))
58 l, r = map(revlog.hex, (t.lnode, t.rnode))
58 fp.write(l + ':' + r + '\n')
59 fp.write(l + ':' + r + '\n')
59 fp.close()
60 fp.close()
60 self.dirty = False
61 self.dirty = False
61
62
62 def get(self, rnode):
63 def get(self, rnode):
63 return self.transplants.get(rnode) or []
64 return self.transplants.get(rnode) or []
64
65
65 def set(self, lnode, rnode):
66 def set(self, lnode, rnode):
66 list = self.transplants.setdefault(rnode, [])
67 list = self.transplants.setdefault(rnode, [])
67 list.append(transplantentry(lnode, rnode))
68 list.append(transplantentry(lnode, rnode))
68 self.dirty = True
69 self.dirty = True
69
70
70 def remove(self, transplant):
71 def remove(self, transplant):
71 list = self.transplants.get(transplant.rnode)
72 list = self.transplants.get(transplant.rnode)
72 if list:
73 if list:
73 del list[list.index(transplant)]
74 del list[list.index(transplant)]
74 self.dirty = True
75 self.dirty = True
75
76
76 class transplanter(object):
77 class transplanter(object):
77 def __init__(self, ui, repo):
78 def __init__(self, ui, repo):
78 self.ui = ui
79 self.ui = ui
79 self.path = repo.join('transplant')
80 self.path = repo.join('transplant')
80 self.opener = scmutil.opener(self.path)
81 self.opener = scmutil.opener(self.path)
81 self.transplants = transplants(self.path, 'transplants',
82 self.transplants = transplants(self.path, 'transplants',
82 opener=self.opener)
83 opener=self.opener)
83
84
84 def applied(self, repo, node, parent):
85 def applied(self, repo, node, parent):
85 '''returns True if a node is already an ancestor of parent
86 '''returns True if a node is already an ancestor of parent
86 or has already been transplanted'''
87 or has already been transplanted'''
87 if hasnode(repo, node):
88 if hasnode(repo, node):
88 if node in repo.changelog.reachable(parent, stop=node):
89 if node in repo.changelog.reachable(parent, stop=node):
89 return True
90 return True
90 for t in self.transplants.get(node):
91 for t in self.transplants.get(node):
91 # it might have been stripped
92 # it might have been stripped
92 if not hasnode(repo, t.lnode):
93 if not hasnode(repo, t.lnode):
93 self.transplants.remove(t)
94 self.transplants.remove(t)
94 return False
95 return False
95 if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
96 if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
96 return True
97 return True
97 return False
98 return False
98
99
99 def apply(self, repo, source, revmap, merges, opts={}):
100 def apply(self, repo, source, revmap, merges, opts={}):
100 '''apply the revisions in revmap one by one in revision order'''
101 '''apply the revisions in revmap one by one in revision order'''
101 revs = sorted(revmap)
102 revs = sorted(revmap)
102 p1, p2 = repo.dirstate.parents()
103 p1, p2 = repo.dirstate.parents()
103 pulls = []
104 pulls = []
104 diffopts = patch.diffopts(self.ui, opts)
105 diffopts = patch.diffopts(self.ui, opts)
105 diffopts.git = True
106 diffopts.git = True
106
107
107 lock = wlock = None
108 lock = wlock = None
108 try:
109 try:
109 wlock = repo.wlock()
110 wlock = repo.wlock()
110 lock = repo.lock()
111 lock = repo.lock()
111 for rev in revs:
112 for rev in revs:
112 node = revmap[rev]
113 node = revmap[rev]
113 revstr = '%s:%s' % (rev, revlog.short(node))
114 revstr = '%s:%s' % (rev, short(node))
114
115
115 if self.applied(repo, node, p1):
116 if self.applied(repo, node, p1):
116 self.ui.warn(_('skipping already applied revision %s\n') %
117 self.ui.warn(_('skipping already applied revision %s\n') %
117 revstr)
118 revstr)
118 continue
119 continue
119
120
120 parents = source.changelog.parents(node)
121 parents = source.changelog.parents(node)
121 if not opts.get('filter'):
122 if not opts.get('filter'):
122 # If the changeset parent is the same as the
123 # If the changeset parent is the same as the
123 # wdir's parent, just pull it.
124 # wdir's parent, just pull it.
124 if parents[0] == p1:
125 if parents[0] == p1:
125 pulls.append(node)
126 pulls.append(node)
126 p1 = node
127 p1 = node
127 continue
128 continue
128 if pulls:
129 if pulls:
129 if source != repo:
130 if source != repo:
130 repo.pull(source, heads=pulls)
131 repo.pull(source, heads=pulls)
131 merge.update(repo, pulls[-1], False, False, None)
132 merge.update(repo, pulls[-1], False, False, None)
132 p1, p2 = repo.dirstate.parents()
133 p1, p2 = repo.dirstate.parents()
133 pulls = []
134 pulls = []
134
135
135 domerge = False
136 domerge = False
136 if node in merges:
137 if node in merges:
137 # pulling all the merge revs at once would mean we
138 # pulling all the merge revs at once would mean we
138 # couldn't transplant after the latest even if
139 # couldn't transplant after the latest even if
139 # transplants before them fail.
140 # transplants before them fail.
140 domerge = True
141 domerge = True
141 if not hasnode(repo, node):
142 if not hasnode(repo, node):
142 repo.pull(source, heads=[node])
143 repo.pull(source, heads=[node])
143
144
144 if parents[1] != revlog.nullid:
145 if parents[1] != revlog.nullid:
145 self.ui.note(_('skipping merge changeset %s:%s\n')
146 self.ui.note(_('skipping merge changeset %s:%s\n')
146 % (rev, revlog.short(node)))
147 % (rev, short(node)))
147 patchfile = None
148 patchfile = None
148 else:
149 else:
149 fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
150 fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
150 fp = os.fdopen(fd, 'w')
151 fp = os.fdopen(fd, 'w')
151 gen = patch.diff(source, parents[0], node, opts=diffopts)
152 gen = patch.diff(source, parents[0], node, opts=diffopts)
152 for chunk in gen:
153 for chunk in gen:
153 fp.write(chunk)
154 fp.write(chunk)
154 fp.close()
155 fp.close()
155
156
156 del revmap[rev]
157 del revmap[rev]
157 if patchfile or domerge:
158 if patchfile or domerge:
158 try:
159 try:
159 n = self.applyone(repo, node,
160 n = self.applyone(repo, node,
160 source.changelog.read(node),
161 source.changelog.read(node),
161 patchfile, merge=domerge,
162 patchfile, merge=domerge,
162 log=opts.get('log'),
163 log=opts.get('log'),
163 filter=opts.get('filter'))
164 filter=opts.get('filter'))
164 if n and domerge:
165 if n and domerge:
165 self.ui.status(_('%s merged at %s\n') % (revstr,
166 self.ui.status(_('%s merged at %s\n') % (revstr,
166 revlog.short(n)))
167 short(n)))
167 elif n:
168 elif n:
168 self.ui.status(_('%s transplanted to %s\n')
169 self.ui.status(_('%s transplanted to %s\n')
169 % (revlog.short(node),
170 % (short(node),
170 revlog.short(n)))
171 short(n)))
171 finally:
172 finally:
172 if patchfile:
173 if patchfile:
173 os.unlink(patchfile)
174 os.unlink(patchfile)
174 if pulls:
175 if pulls:
175 repo.pull(source, heads=pulls)
176 repo.pull(source, heads=pulls)
176 merge.update(repo, pulls[-1], False, False, None)
177 merge.update(repo, pulls[-1], False, False, None)
177 finally:
178 finally:
178 self.saveseries(revmap, merges)
179 self.saveseries(revmap, merges)
179 self.transplants.write()
180 self.transplants.write()
180 lock.release()
181 lock.release()
181 wlock.release()
182 wlock.release()
182
183
183 def filter(self, filter, node, changelog, patchfile):
184 def filter(self, filter, node, changelog, patchfile):
184 '''arbitrarily rewrite changeset before applying it'''
185 '''arbitrarily rewrite changeset before applying it'''
185
186
186 self.ui.status(_('filtering %s\n') % patchfile)
187 self.ui.status(_('filtering %s\n') % patchfile)
187 user, date, msg = (changelog[1], changelog[2], changelog[4])
188 user, date, msg = (changelog[1], changelog[2], changelog[4])
188 fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
189 fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
189 fp = os.fdopen(fd, 'w')
190 fp = os.fdopen(fd, 'w')
190 fp.write("# HG changeset patch\n")
191 fp.write("# HG changeset patch\n")
191 fp.write("# User %s\n" % user)
192 fp.write("# User %s\n" % user)
192 fp.write("# Date %d %d\n" % date)
193 fp.write("# Date %d %d\n" % date)
193 fp.write(msg + '\n')
194 fp.write(msg + '\n')
194 fp.close()
195 fp.close()
195
196
196 try:
197 try:
197 util.system('%s %s %s' % (filter, util.shellquote(headerfile),
198 util.system('%s %s %s' % (filter, util.shellquote(headerfile),
198 util.shellquote(patchfile)),
199 util.shellquote(patchfile)),
199 environ={'HGUSER': changelog[1],
200 environ={'HGUSER': changelog[1],
200 'HGREVISION': revlog.hex(node),
201 'HGREVISION': revlog.hex(node),
201 },
202 },
202 onerr=util.Abort, errprefix=_('filter failed'))
203 onerr=util.Abort, errprefix=_('filter failed'))
203 user, date, msg = self.parselog(file(headerfile))[1:4]
204 user, date, msg = self.parselog(file(headerfile))[1:4]
204 finally:
205 finally:
205 os.unlink(headerfile)
206 os.unlink(headerfile)
206
207
207 return (user, date, msg)
208 return (user, date, msg)
208
209
209 def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
210 def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
210 filter=None):
211 filter=None):
211 '''apply the patch in patchfile to the repository as a transplant'''
212 '''apply the patch in patchfile to the repository as a transplant'''
212 (manifest, user, (time, timezone), files, message) = cl[:5]
213 (manifest, user, (time, timezone), files, message) = cl[:5]
213 date = "%d %d" % (time, timezone)
214 date = "%d %d" % (time, timezone)
214 extra = {'transplant_source': node}
215 extra = {'transplant_source': node}
215 if filter:
216 if filter:
216 (user, date, message) = self.filter(filter, node, cl, patchfile)
217 (user, date, message) = self.filter(filter, node, cl, patchfile)
217
218
218 if log:
219 if log:
219 # we don't translate messages inserted into commits
220 # we don't translate messages inserted into commits
220 message += '\n(transplanted from %s)' % revlog.hex(node)
221 message += '\n(transplanted from %s)' % revlog.hex(node)
221
222
222 self.ui.status(_('applying %s\n') % revlog.short(node))
223 self.ui.status(_('applying %s\n') % short(node))
223 self.ui.note('%s %s\n%s\n' % (user, date, message))
224 self.ui.note('%s %s\n%s\n' % (user, date, message))
224
225
225 if not patchfile and not merge:
226 if not patchfile and not merge:
226 raise util.Abort(_('can only omit patchfile if merging'))
227 raise util.Abort(_('can only omit patchfile if merging'))
227 if patchfile:
228 if patchfile:
228 try:
229 try:
229 files = {}
230 files = {}
230 patch.patch(self.ui, repo, patchfile, files=files, eolmode=None)
231 patch.patch(self.ui, repo, patchfile, files=files, eolmode=None)
231 files = list(files)
232 files = list(files)
232 if not files:
233 if not files:
233 self.ui.warn(_('%s: empty changeset') % revlog.hex(node))
234 self.ui.warn(_('%s: empty changeset') % revlog.hex(node))
234 return None
235 return None
235 except Exception, inst:
236 except Exception, inst:
236 seriespath = os.path.join(self.path, 'series')
237 seriespath = os.path.join(self.path, 'series')
237 if os.path.exists(seriespath):
238 if os.path.exists(seriespath):
238 os.unlink(seriespath)
239 os.unlink(seriespath)
239 p1 = repo.dirstate.p1()
240 p1 = repo.dirstate.p1()
240 p2 = node
241 p2 = node
241 self.log(user, date, message, p1, p2, merge=merge)
242 self.log(user, date, message, p1, p2, merge=merge)
242 self.ui.write(str(inst) + '\n')
243 self.ui.write(str(inst) + '\n')
243 raise util.Abort(_('fix up the merge and run '
244 raise util.Abort(_('fix up the merge and run '
244 'hg transplant --continue'))
245 'hg transplant --continue'))
245 else:
246 else:
246 files = None
247 files = None
247 if merge:
248 if merge:
248 p1, p2 = repo.dirstate.parents()
249 p1, p2 = repo.dirstate.parents()
249 repo.dirstate.setparents(p1, node)
250 repo.dirstate.setparents(p1, node)
250 m = match.always(repo.root, '')
251 m = match.always(repo.root, '')
251 else:
252 else:
252 m = match.exact(repo.root, '', files)
253 m = match.exact(repo.root, '', files)
253
254
254 n = repo.commit(message, user, date, extra=extra, match=m)
255 n = repo.commit(message, user, date, extra=extra, match=m)
255 if not n:
256 if not n:
256 # Crash here to prevent an unclear crash later, in
257 # Crash here to prevent an unclear crash later, in
257 # transplants.write(). This can happen if patch.patch()
258 # transplants.write(). This can happen if patch.patch()
258 # does nothing but claims success or if repo.status() fails
259 # does nothing but claims success or if repo.status() fails
259 # to report changes done by patch.patch(). These both
260 # to report changes done by patch.patch(). These both
260 # appear to be bugs in other parts of Mercurial, but dying
261 # appear to be bugs in other parts of Mercurial, but dying
261 # here, as soon as we can detect the problem, is preferable
262 # here, as soon as we can detect the problem, is preferable
262 # to silently dropping changesets on the floor.
263 # to silently dropping changesets on the floor.
263 raise RuntimeError('nothing committed after transplant')
264 raise RuntimeError('nothing committed after transplant')
264 if not merge:
265 if not merge:
265 self.transplants.set(n, node)
266 self.transplants.set(n, node)
266
267
267 return n
268 return n
268
269
269 def resume(self, repo, source, opts=None):
270 def resume(self, repo, source, opts=None):
270 '''recover last transaction and apply remaining changesets'''
271 '''recover last transaction and apply remaining changesets'''
271 if os.path.exists(os.path.join(self.path, 'journal')):
272 if os.path.exists(os.path.join(self.path, 'journal')):
272 n, node = self.recover(repo)
273 n, node = self.recover(repo)
273 self.ui.status(_('%s transplanted as %s\n') % (revlog.short(node),
274 self.ui.status(_('%s transplanted as %s\n') % (short(node),
274 revlog.short(n)))
275 short(n)))
275 seriespath = os.path.join(self.path, 'series')
276 seriespath = os.path.join(self.path, 'series')
276 if not os.path.exists(seriespath):
277 if not os.path.exists(seriespath):
277 self.transplants.write()
278 self.transplants.write()
278 return
279 return
279 nodes, merges = self.readseries()
280 nodes, merges = self.readseries()
280 revmap = {}
281 revmap = {}
281 for n in nodes:
282 for n in nodes:
282 revmap[source.changelog.rev(n)] = n
283 revmap[source.changelog.rev(n)] = n
283 os.unlink(seriespath)
284 os.unlink(seriespath)
284
285
285 self.apply(repo, source, revmap, merges, opts)
286 self.apply(repo, source, revmap, merges, opts)
286
287
287 def recover(self, repo):
288 def recover(self, repo):
288 '''commit working directory using journal metadata'''
289 '''commit working directory using journal metadata'''
289 node, user, date, message, parents = self.readlog()
290 node, user, date, message, parents = self.readlog()
290 merge = len(parents) == 2
291 merge = len(parents) == 2
291
292
292 if not user or not date or not message or not parents[0]:
293 if not user or not date or not message or not parents[0]:
293 raise util.Abort(_('transplant log file is corrupt'))
294 raise util.Abort(_('transplant log file is corrupt'))
294
295
295 extra = {'transplant_source': node}
296 extra = {'transplant_source': node}
296 wlock = repo.wlock()
297 wlock = repo.wlock()
297 try:
298 try:
298 p1, p2 = repo.dirstate.parents()
299 p1, p2 = repo.dirstate.parents()
299 if p1 != parents[0]:
300 if p1 != parents[0]:
300 raise util.Abort(
301 raise util.Abort(
301 _('working dir not at transplant parent %s') %
302 _('working dir not at transplant parent %s') %
302 revlog.hex(parents[0]))
303 revlog.hex(parents[0]))
303 if merge:
304 if merge:
304 repo.dirstate.setparents(p1, parents[1])
305 repo.dirstate.setparents(p1, parents[1])
305 n = repo.commit(message, user, date, extra=extra)
306 n = repo.commit(message, user, date, extra=extra)
306 if not n:
307 if not n:
307 raise util.Abort(_('commit failed'))
308 raise util.Abort(_('commit failed'))
308 if not merge:
309 if not merge:
309 self.transplants.set(n, node)
310 self.transplants.set(n, node)
310 self.unlog()
311 self.unlog()
311
312
312 return n, node
313 return n, node
313 finally:
314 finally:
314 wlock.release()
315 wlock.release()
315
316
316 def readseries(self):
317 def readseries(self):
317 nodes = []
318 nodes = []
318 merges = []
319 merges = []
319 cur = nodes
320 cur = nodes
320 for line in self.opener.read('series').splitlines():
321 for line in self.opener.read('series').splitlines():
321 if line.startswith('# Merges'):
322 if line.startswith('# Merges'):
322 cur = merges
323 cur = merges
323 continue
324 continue
324 cur.append(revlog.bin(line))
325 cur.append(revlog.bin(line))
325
326
326 return (nodes, merges)
327 return (nodes, merges)
327
328
328 def saveseries(self, revmap, merges):
329 def saveseries(self, revmap, merges):
329 if not revmap:
330 if not revmap:
330 return
331 return
331
332
332 if not os.path.isdir(self.path):
333 if not os.path.isdir(self.path):
333 os.mkdir(self.path)
334 os.mkdir(self.path)
334 series = self.opener('series', 'w')
335 series = self.opener('series', 'w')
335 for rev in sorted(revmap):
336 for rev in sorted(revmap):
336 series.write(revlog.hex(revmap[rev]) + '\n')
337 series.write(revlog.hex(revmap[rev]) + '\n')
337 if merges:
338 if merges:
338 series.write('# Merges\n')
339 series.write('# Merges\n')
339 for m in merges:
340 for m in merges:
340 series.write(revlog.hex(m) + '\n')
341 series.write(revlog.hex(m) + '\n')
341 series.close()
342 series.close()
342
343
343 def parselog(self, fp):
344 def parselog(self, fp):
344 parents = []
345 parents = []
345 message = []
346 message = []
346 node = revlog.nullid
347 node = revlog.nullid
347 inmsg = False
348 inmsg = False
348 user = None
349 user = None
349 date = None
350 date = None
350 for line in fp.read().splitlines():
351 for line in fp.read().splitlines():
351 if inmsg:
352 if inmsg:
352 message.append(line)
353 message.append(line)
353 elif line.startswith('# User '):
354 elif line.startswith('# User '):
354 user = line[7:]
355 user = line[7:]
355 elif line.startswith('# Date '):
356 elif line.startswith('# Date '):
356 date = line[7:]
357 date = line[7:]
357 elif line.startswith('# Node ID '):
358 elif line.startswith('# Node ID '):
358 node = revlog.bin(line[10:])
359 node = revlog.bin(line[10:])
359 elif line.startswith('# Parent '):
360 elif line.startswith('# Parent '):
360 parents.append(revlog.bin(line[9:]))
361 parents.append(revlog.bin(line[9:]))
361 elif not line.startswith('# '):
362 elif not line.startswith('# '):
362 inmsg = True
363 inmsg = True
363 message.append(line)
364 message.append(line)
364 if None in (user, date):
365 if None in (user, date):
365 raise util.Abort(_("filter corrupted changeset (no user or date)"))
366 raise util.Abort(_("filter corrupted changeset (no user or date)"))
366 return (node, user, date, '\n'.join(message), parents)
367 return (node, user, date, '\n'.join(message), parents)
367
368
368 def log(self, user, date, message, p1, p2, merge=False):
369 def log(self, user, date, message, p1, p2, merge=False):
369 '''journal changelog metadata for later recover'''
370 '''journal changelog metadata for later recover'''
370
371
371 if not os.path.isdir(self.path):
372 if not os.path.isdir(self.path):
372 os.mkdir(self.path)
373 os.mkdir(self.path)
373 fp = self.opener('journal', 'w')
374 fp = self.opener('journal', 'w')
374 fp.write('# User %s\n' % user)
375 fp.write('# User %s\n' % user)
375 fp.write('# Date %s\n' % date)
376 fp.write('# Date %s\n' % date)
376 fp.write('# Node ID %s\n' % revlog.hex(p2))
377 fp.write('# Node ID %s\n' % revlog.hex(p2))
377 fp.write('# Parent ' + revlog.hex(p1) + '\n')
378 fp.write('# Parent ' + revlog.hex(p1) + '\n')
378 if merge:
379 if merge:
379 fp.write('# Parent ' + revlog.hex(p2) + '\n')
380 fp.write('# Parent ' + revlog.hex(p2) + '\n')
380 fp.write(message.rstrip() + '\n')
381 fp.write(message.rstrip() + '\n')
381 fp.close()
382 fp.close()
382
383
383 def readlog(self):
384 def readlog(self):
384 return self.parselog(self.opener('journal'))
385 return self.parselog(self.opener('journal'))
385
386
386 def unlog(self):
387 def unlog(self):
387 '''remove changelog journal'''
388 '''remove changelog journal'''
388 absdst = os.path.join(self.path, 'journal')
389 absdst = os.path.join(self.path, 'journal')
389 if os.path.exists(absdst):
390 if os.path.exists(absdst):
390 os.unlink(absdst)
391 os.unlink(absdst)
391
392
392 def transplantfilter(self, repo, source, root):
393 def transplantfilter(self, repo, source, root):
393 def matchfn(node):
394 def matchfn(node):
394 if self.applied(repo, node, root):
395 if self.applied(repo, node, root):
395 return False
396 return False
396 if source.changelog.parents(node)[1] != revlog.nullid:
397 if source.changelog.parents(node)[1] != revlog.nullid:
397 return False
398 return False
398 extra = source.changelog.read(node)[5]
399 extra = source.changelog.read(node)[5]
399 cnode = extra.get('transplant_source')
400 cnode = extra.get('transplant_source')
400 if cnode and self.applied(repo, cnode, root):
401 if cnode and self.applied(repo, cnode, root):
401 return False
402 return False
402 return True
403 return True
403
404
404 return matchfn
405 return matchfn
405
406
406 def hasnode(repo, node):
407 def hasnode(repo, node):
407 try:
408 try:
408 return repo.changelog.rev(node) is not None
409 return repo.changelog.rev(node) is not None
409 except error.RevlogError:
410 except error.RevlogError:
410 return False
411 return False
411
412
412 def browserevs(ui, repo, nodes, opts):
413 def browserevs(ui, repo, nodes, opts):
413 '''interactively transplant changesets'''
414 '''interactively transplant changesets'''
414 def browsehelp(ui):
415 def browsehelp(ui):
415 ui.write(_('y: transplant this changeset\n'
416 ui.write(_('y: transplant this changeset\n'
416 'n: skip this changeset\n'
417 'n: skip this changeset\n'
417 'm: merge at this changeset\n'
418 'm: merge at this changeset\n'
418 'p: show patch\n'
419 'p: show patch\n'
419 'c: commit selected changesets\n'
420 'c: commit selected changesets\n'
420 'q: cancel transplant\n'
421 'q: cancel transplant\n'
421 '?: show this help\n'))
422 '?: show this help\n'))
422
423
423 displayer = cmdutil.show_changeset(ui, repo, opts)
424 displayer = cmdutil.show_changeset(ui, repo, opts)
424 transplants = []
425 transplants = []
425 merges = []
426 merges = []
426 for node in nodes:
427 for node in nodes:
427 displayer.show(repo[node])
428 displayer.show(repo[node])
428 action = None
429 action = None
429 while not action:
430 while not action:
430 action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
431 action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
431 if action == '?':
432 if action == '?':
432 browsehelp(ui)
433 browsehelp(ui)
433 action = None
434 action = None
434 elif action == 'p':
435 elif action == 'p':
435 parent = repo.changelog.parents(node)[0]
436 parent = repo.changelog.parents(node)[0]
436 for chunk in patch.diff(repo, parent, node):
437 for chunk in patch.diff(repo, parent, node):
437 ui.write(chunk)
438 ui.write(chunk)
438 action = None
439 action = None
439 elif action not in ('y', 'n', 'm', 'c', 'q'):
440 elif action not in ('y', 'n', 'm', 'c', 'q'):
440 ui.write(_('no such option\n'))
441 ui.write(_('no such option\n'))
441 action = None
442 action = None
442 if action == 'y':
443 if action == 'y':
443 transplants.append(node)
444 transplants.append(node)
444 elif action == 'm':
445 elif action == 'm':
445 merges.append(node)
446 merges.append(node)
446 elif action == 'c':
447 elif action == 'c':
447 break
448 break
448 elif action == 'q':
449 elif action == 'q':
449 transplants = ()
450 transplants = ()
450 merges = ()
451 merges = ()
451 break
452 break
452 displayer.close()
453 displayer.close()
453 return (transplants, merges)
454 return (transplants, merges)
454
455
455 @command('transplant',
456 @command('transplant',
456 [('s', 'source', '', _('pull patches from REPO'), _('REPO')),
457 [('s', 'source', '', _('pull patches from REPO'), _('REPO')),
457 ('b', 'branch', [],
458 ('b', 'branch', [],
458 _('pull patches from branch BRANCH'), _('BRANCH')),
459 _('pull patches from branch BRANCH'), _('BRANCH')),
459 ('a', 'all', None, _('pull all changesets up to BRANCH')),
460 ('a', 'all', None, _('pull all changesets up to BRANCH')),
460 ('p', 'prune', [], _('skip over REV'), _('REV')),
461 ('p', 'prune', [], _('skip over REV'), _('REV')),
461 ('m', 'merge', [], _('merge at REV'), _('REV')),
462 ('m', 'merge', [], _('merge at REV'), _('REV')),
462 ('', 'log', None, _('append transplant info to log message')),
463 ('', 'log', None, _('append transplant info to log message')),
463 ('c', 'continue', None, _('continue last transplant session '
464 ('c', 'continue', None, _('continue last transplant session '
464 'after repair')),
465 'after repair')),
465 ('', 'filter', '',
466 ('', 'filter', '',
466 _('filter changesets through command'), _('CMD'))],
467 _('filter changesets through command'), _('CMD'))],
467 _('hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] '
468 _('hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] '
468 '[-m REV] [REV]...'))
469 '[-m REV] [REV]...'))
469 def transplant(ui, repo, *revs, **opts):
470 def transplant(ui, repo, *revs, **opts):
470 '''transplant changesets from another branch
471 '''transplant changesets from another branch
471
472
472 Selected changesets will be applied on top of the current working
473 Selected changesets will be applied on top of the current working
473 directory with the log of the original changeset. The changesets
474 directory with the log of the original changeset. The changesets
474 are copied and will thus appear twice in the history. Use the
475 are copied and will thus appear twice in the history. Use the
475 rebase extension instead if you want to move a whole branch of
476 rebase extension instead if you want to move a whole branch of
476 unpublished changesets.
477 unpublished changesets.
477
478
478 If --log is specified, log messages will have a comment appended
479 If --log is specified, log messages will have a comment appended
479 of the form::
480 of the form::
480
481
481 (transplanted from CHANGESETHASH)
482 (transplanted from CHANGESETHASH)
482
483
483 You can rewrite the changelog message with the --filter option.
484 You can rewrite the changelog message with the --filter option.
484 Its argument will be invoked with the current changelog message as
485 Its argument will be invoked with the current changelog message as
485 $1 and the patch as $2.
486 $1 and the patch as $2.
486
487
487 If --source/-s is specified, selects changesets from the named
488 If --source/-s is specified, selects changesets from the named
488 repository. If --branch/-b is specified, selects changesets from
489 repository. If --branch/-b is specified, selects changesets from
489 the branch holding the named revision, up to that revision. If
490 the branch holding the named revision, up to that revision. If
490 --all/-a is specified, all changesets on the branch will be
491 --all/-a is specified, all changesets on the branch will be
491 transplanted, otherwise you will be prompted to select the
492 transplanted, otherwise you will be prompted to select the
492 changesets you want.
493 changesets you want.
493
494
494 :hg:`transplant --branch REVISION --all` will transplant the
495 :hg:`transplant --branch REVISION --all` will transplant the
495 selected branch (up to the named revision) onto your current
496 selected branch (up to the named revision) onto your current
496 working directory.
497 working directory.
497
498
498 You can optionally mark selected transplanted changesets as merge
499 You can optionally mark selected transplanted changesets as merge
499 changesets. You will not be prompted to transplant any ancestors
500 changesets. You will not be prompted to transplant any ancestors
500 of a merged transplant, and you can merge descendants of them
501 of a merged transplant, and you can merge descendants of them
501 normally instead of transplanting them.
502 normally instead of transplanting them.
502
503
503 If no merges or revisions are provided, :hg:`transplant` will
504 If no merges or revisions are provided, :hg:`transplant` will
504 start an interactive changeset browser.
505 start an interactive changeset browser.
505
506
506 If a changeset application fails, you can fix the merge by hand
507 If a changeset application fails, you can fix the merge by hand
507 and then resume where you left off by calling :hg:`transplant
508 and then resume where you left off by calling :hg:`transplant
508 --continue/-c`.
509 --continue/-c`.
509 '''
510 '''
510 def incwalk(repo, csets, match=util.always):
511 def incwalk(repo, csets, match=util.always):
511 for node in csets:
512 for node in csets:
512 if match(node):
513 if match(node):
513 yield node
514 yield node
514
515
515 def transplantwalk(repo, root, branches, match=util.always):
516 def transplantwalk(repo, root, branches, match=util.always):
516 if not branches:
517 if not branches:
517 branches = repo.heads()
518 branches = repo.heads()
518 ancestors = []
519 ancestors = []
519 for branch in branches:
520 for branch in branches:
520 ancestors.append(repo.changelog.ancestor(root, branch))
521 ancestors.append(repo.changelog.ancestor(root, branch))
521 for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
522 for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
522 if match(node):
523 if match(node):
523 yield node
524 yield node
524
525
525 def checkopts(opts, revs):
526 def checkopts(opts, revs):
526 if opts.get('continue'):
527 if opts.get('continue'):
527 if opts.get('branch') or opts.get('all') or opts.get('merge'):
528 if opts.get('branch') or opts.get('all') or opts.get('merge'):
528 raise util.Abort(_('--continue is incompatible with '
529 raise util.Abort(_('--continue is incompatible with '
529 'branch, all or merge'))
530 'branch, all or merge'))
530 return
531 return
531 if not (opts.get('source') or revs or
532 if not (opts.get('source') or revs or
532 opts.get('merge') or opts.get('branch')):
533 opts.get('merge') or opts.get('branch')):
533 raise util.Abort(_('no source URL, branch tag or revision '
534 raise util.Abort(_('no source URL, branch tag or revision '
534 'list provided'))
535 'list provided'))
535 if opts.get('all'):
536 if opts.get('all'):
536 if not opts.get('branch'):
537 if not opts.get('branch'):
537 raise util.Abort(_('--all requires a branch revision'))
538 raise util.Abort(_('--all requires a branch revision'))
538 if revs:
539 if revs:
539 raise util.Abort(_('--all is incompatible with a '
540 raise util.Abort(_('--all is incompatible with a '
540 'revision list'))
541 'revision list'))
541
542
542 checkopts(opts, revs)
543 checkopts(opts, revs)
543
544
544 if not opts.get('log'):
545 if not opts.get('log'):
545 opts['log'] = ui.config('transplant', 'log')
546 opts['log'] = ui.config('transplant', 'log')
546 if not opts.get('filter'):
547 if not opts.get('filter'):
547 opts['filter'] = ui.config('transplant', 'filter')
548 opts['filter'] = ui.config('transplant', 'filter')
548
549
549 tp = transplanter(ui, repo)
550 tp = transplanter(ui, repo)
550
551
551 p1, p2 = repo.dirstate.parents()
552 p1, p2 = repo.dirstate.parents()
552 if len(repo) > 0 and p1 == revlog.nullid:
553 if len(repo) > 0 and p1 == revlog.nullid:
553 raise util.Abort(_('no revision checked out'))
554 raise util.Abort(_('no revision checked out'))
554 if not opts.get('continue'):
555 if not opts.get('continue'):
555 if p2 != revlog.nullid:
556 if p2 != revlog.nullid:
556 raise util.Abort(_('outstanding uncommitted merges'))
557 raise util.Abort(_('outstanding uncommitted merges'))
557 m, a, r, d = repo.status()[:4]
558 m, a, r, d = repo.status()[:4]
558 if m or a or r or d:
559 if m or a or r or d:
559 raise util.Abort(_('outstanding local changes'))
560 raise util.Abort(_('outstanding local changes'))
560
561
561 sourcerepo = opts.get('source')
562 sourcerepo = opts.get('source')
562 if sourcerepo:
563 if sourcerepo:
563 source = hg.repository(ui, ui.expandpath(sourcerepo))
564 source = hg.repository(ui, ui.expandpath(sourcerepo))
564 branches = map(source.lookup, opts.get('branch', ()))
565 branches = map(source.lookup, opts.get('branch', ()))
565 source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, source,
566 source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, source,
566 onlyheads=branches, force=True)
567 onlyheads=branches, force=True)
567 else:
568 else:
568 source = repo
569 source = repo
569 branches = map(source.lookup, opts.get('branch', ()))
570 branches = map(source.lookup, opts.get('branch', ()))
570 cleanupfn = None
571 cleanupfn = None
571
572
572 try:
573 try:
573 if opts.get('continue'):
574 if opts.get('continue'):
574 tp.resume(repo, source, opts)
575 tp.resume(repo, source, opts)
575 return
576 return
576
577
577 tf = tp.transplantfilter(repo, source, p1)
578 tf = tp.transplantfilter(repo, source, p1)
578 if opts.get('prune'):
579 if opts.get('prune'):
579 prune = [source.lookup(r)
580 prune = [source.lookup(r)
580 for r in scmutil.revrange(source, opts.get('prune'))]
581 for r in scmutil.revrange(source, opts.get('prune'))]
581 matchfn = lambda x: tf(x) and x not in prune
582 matchfn = lambda x: tf(x) and x not in prune
582 else:
583 else:
583 matchfn = tf
584 matchfn = tf
584 merges = map(source.lookup, opts.get('merge', ()))
585 merges = map(source.lookup, opts.get('merge', ()))
585 revmap = {}
586 revmap = {}
586 if revs:
587 if revs:
587 for r in scmutil.revrange(source, revs):
588 for r in scmutil.revrange(source, revs):
588 revmap[int(r)] = source.lookup(r)
589 revmap[int(r)] = source.lookup(r)
589 elif opts.get('all') or not merges:
590 elif opts.get('all') or not merges:
590 if source != repo:
591 if source != repo:
591 alltransplants = incwalk(source, csets, match=matchfn)
592 alltransplants = incwalk(source, csets, match=matchfn)
592 else:
593 else:
593 alltransplants = transplantwalk(source, p1, branches,
594 alltransplants = transplantwalk(source, p1, branches,
594 match=matchfn)
595 match=matchfn)
595 if opts.get('all'):
596 if opts.get('all'):
596 revs = alltransplants
597 revs = alltransplants
597 else:
598 else:
598 revs, newmerges = browserevs(ui, source, alltransplants, opts)
599 revs, newmerges = browserevs(ui, source, alltransplants, opts)
599 merges.extend(newmerges)
600 merges.extend(newmerges)
600 for r in revs:
601 for r in revs:
601 revmap[source.changelog.rev(r)] = r
602 revmap[source.changelog.rev(r)] = r
602 for r in merges:
603 for r in merges:
603 revmap[source.changelog.rev(r)] = r
604 revmap[source.changelog.rev(r)] = r
604
605
605 tp.apply(repo, source, revmap, merges, opts)
606 tp.apply(repo, source, revmap, merges, opts)
606 finally:
607 finally:
607 if cleanupfn:
608 if cleanupfn:
608 cleanupfn()
609 cleanupfn()
609
610
610 def revsettransplanted(repo, subset, x):
611 def revsettransplanted(repo, subset, x):
611 """``transplanted([set])``
612 """``transplanted([set])``
612 Transplanted changesets in set, or all transplanted changesets.
613 Transplanted changesets in set, or all transplanted changesets.
613 """
614 """
614 if x:
615 if x:
615 s = revset.getset(repo, subset, x)
616 s = revset.getset(repo, subset, x)
616 else:
617 else:
617 s = subset
618 s = subset
618 return [r for r in s if repo[r].extra().get('transplant_source')]
619 return [r for r in s if repo[r].extra().get('transplant_source')]
619
620
620 def kwtransplanted(repo, ctx, **args):
621 def kwtransplanted(repo, ctx, **args):
621 """:transplanted: String. The node identifier of the transplanted
622 """:transplanted: String. The node identifier of the transplanted
622 changeset if any."""
623 changeset if any."""
623 n = ctx.extra().get('transplant_source')
624 n = ctx.extra().get('transplant_source')
624 return n and revlog.hex(n) or ''
625 return n and revlog.hex(n) or ''
625
626
626 def extsetup(ui):
627 def extsetup(ui):
627 revset.symbols['transplanted'] = revsettransplanted
628 revset.symbols['transplanted'] = revsettransplanted
628 templatekw.keywords['transplanted'] = kwtransplanted
629 templatekw.keywords['transplanted'] = kwtransplanted
629
630
630 # tell hggettext to extract docstrings from these functions:
631 # tell hggettext to extract docstrings from these functions:
631 i18nfunctions = [revsettransplanted, kwtransplanted]
632 i18nfunctions = [revsettransplanted, kwtransplanted]
@@ -1,1278 +1,1278 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from node import bin, hex, nullid, nullrev
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util, dagutil
17 import ancestor, mdiff, parsers, error, util, dagutil
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_KNOWN_FLAGS = 0
37 REVIDX_KNOWN_FLAGS = 0
38
38
39 # max size of revlog with inline data
39 # max size of revlog with inline data
40 _maxinline = 131072
40 _maxinline = 131072
41 _chunksize = 1048576
41 _chunksize = 1048576
42
42
43 RevlogError = error.RevlogError
43 RevlogError = error.RevlogError
44 LookupError = error.LookupError
44 LookupError = error.LookupError
45
45
46 def getoffset(q):
46 def getoffset(q):
47 return int(q >> 16)
47 return int(q >> 16)
48
48
49 def gettype(q):
49 def gettype(q):
50 return int(q & 0xFFFF)
50 return int(q & 0xFFFF)
51
51
52 def offset_type(offset, type):
52 def offset_type(offset, type):
53 return long(long(offset) << 16 | type)
53 return long(long(offset) << 16 | type)
54
54
55 nullhash = _sha(nullid)
55 nullhash = _sha(nullid)
56
56
57 def hash(text, p1, p2):
57 def hash(text, p1, p2):
58 """generate a hash from the given text and its parent hashes
58 """generate a hash from the given text and its parent hashes
59
59
60 This hash combines both the current file contents and its history
60 This hash combines both the current file contents and its history
61 in a manner that makes it easy to distinguish nodes with the same
61 in a manner that makes it easy to distinguish nodes with the same
62 content in the revision graph.
62 content in the revision graph.
63 """
63 """
64 # As of now, if one of the parent node is null, p2 is null
64 # As of now, if one of the parent node is null, p2 is null
65 if p2 == nullid:
65 if p2 == nullid:
66 # deep copy of a hash is faster than creating one
66 # deep copy of a hash is faster than creating one
67 s = nullhash.copy()
67 s = nullhash.copy()
68 s.update(p1)
68 s.update(p1)
69 else:
69 else:
70 # none of the parent nodes are nullid
70 # none of the parent nodes are nullid
71 l = [p1, p2]
71 l = [p1, p2]
72 l.sort()
72 l.sort()
73 s = _sha(l[0])
73 s = _sha(l[0])
74 s.update(l[1])
74 s.update(l[1])
75 s.update(text)
75 s.update(text)
76 return s.digest()
76 return s.digest()
77
77
78 def compress(text):
78 def compress(text):
79 """ generate a possibly-compressed representation of text """
79 """ generate a possibly-compressed representation of text """
80 if not text:
80 if not text:
81 return ("", text)
81 return ("", text)
82 l = len(text)
82 l = len(text)
83 bin = None
83 bin = None
84 if l < 44:
84 if l < 44:
85 pass
85 pass
86 elif l > 1000000:
86 elif l > 1000000:
87 # zlib makes an internal copy, thus doubling memory usage for
87 # zlib makes an internal copy, thus doubling memory usage for
88 # large files, so lets do this in pieces
88 # large files, so lets do this in pieces
89 z = zlib.compressobj()
89 z = zlib.compressobj()
90 p = []
90 p = []
91 pos = 0
91 pos = 0
92 while pos < l:
92 while pos < l:
93 pos2 = pos + 2**20
93 pos2 = pos + 2**20
94 p.append(z.compress(text[pos:pos2]))
94 p.append(z.compress(text[pos:pos2]))
95 pos = pos2
95 pos = pos2
96 p.append(z.flush())
96 p.append(z.flush())
97 if sum(map(len, p)) < l:
97 if sum(map(len, p)) < l:
98 bin = "".join(p)
98 bin = "".join(p)
99 else:
99 else:
100 bin = _compress(text)
100 bin = _compress(text)
101 if bin is None or len(bin) > l:
101 if bin is None or len(bin) > l:
102 if text[0] == '\0':
102 if text[0] == '\0':
103 return ("", text)
103 return ("", text)
104 return ('u', text)
104 return ('u', text)
105 return ("", bin)
105 return ("", bin)
106
106
107 def decompress(bin):
107 def decompress(bin):
108 """ decompress the given input """
108 """ decompress the given input """
109 if not bin:
109 if not bin:
110 return bin
110 return bin
111 t = bin[0]
111 t = bin[0]
112 if t == '\0':
112 if t == '\0':
113 return bin
113 return bin
114 if t == 'x':
114 if t == 'x':
115 return _decompress(bin)
115 return _decompress(bin)
116 if t == 'u':
116 if t == 'u':
117 return bin[1:]
117 return bin[1:]
118 raise RevlogError(_("unknown compression type %r") % t)
118 raise RevlogError(_("unknown compression type %r") % t)
119
119
120 indexformatv0 = ">4l20s20s20s"
120 indexformatv0 = ">4l20s20s20s"
121 v0shaoffset = 56
121 v0shaoffset = 56
122
122
123 class revlogoldio(object):
123 class revlogoldio(object):
124 def __init__(self):
124 def __init__(self):
125 self.size = struct.calcsize(indexformatv0)
125 self.size = struct.calcsize(indexformatv0)
126
126
127 def parseindex(self, data, inline):
127 def parseindex(self, data, inline):
128 s = self.size
128 s = self.size
129 index = []
129 index = []
130 nodemap = {nullid: nullrev}
130 nodemap = {nullid: nullrev}
131 n = off = 0
131 n = off = 0
132 l = len(data)
132 l = len(data)
133 while off + s <= l:
133 while off + s <= l:
134 cur = data[off:off + s]
134 cur = data[off:off + s]
135 off += s
135 off += s
136 e = _unpack(indexformatv0, cur)
136 e = _unpack(indexformatv0, cur)
137 # transform to revlogv1 format
137 # transform to revlogv1 format
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 index.append(e2)
140 index.append(e2)
141 nodemap[e[6]] = n
141 nodemap[e[6]] = n
142 n += 1
142 n += 1
143
143
144 # add the magic null revision at -1
144 # add the magic null revision at -1
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146
146
147 return index, nodemap, None
147 return index, nodemap, None
148
148
149 def packentry(self, entry, node, version, rev):
149 def packentry(self, entry, node, version, rev):
150 if gettype(entry[0]):
150 if gettype(entry[0]):
151 raise RevlogError(_("index entry flags need RevlogNG"))
151 raise RevlogError(_("index entry flags need RevlogNG"))
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 node(entry[5]), node(entry[6]), entry[7])
153 node(entry[5]), node(entry[6]), entry[7])
154 return _pack(indexformatv0, *e2)
154 return _pack(indexformatv0, *e2)
155
155
156 # index ng:
156 # index ng:
157 # 6 bytes: offset
157 # 6 bytes: offset
158 # 2 bytes: flags
158 # 2 bytes: flags
159 # 4 bytes: compressed length
159 # 4 bytes: compressed length
160 # 4 bytes: uncompressed length
160 # 4 bytes: uncompressed length
161 # 4 bytes: base rev
161 # 4 bytes: base rev
162 # 4 bytes: link rev
162 # 4 bytes: link rev
163 # 4 bytes: parent 1 rev
163 # 4 bytes: parent 1 rev
164 # 4 bytes: parent 2 rev
164 # 4 bytes: parent 2 rev
165 # 32 bytes: nodeid
165 # 32 bytes: nodeid
166 indexformatng = ">Qiiiiii20s12x"
166 indexformatng = ">Qiiiiii20s12x"
167 ngshaoffset = 32
167 ngshaoffset = 32
168 versionformat = ">I"
168 versionformat = ">I"
169
169
170 class revlogio(object):
170 class revlogio(object):
171 def __init__(self):
171 def __init__(self):
172 self.size = struct.calcsize(indexformatng)
172 self.size = struct.calcsize(indexformatng)
173
173
174 def parseindex(self, data, inline):
174 def parseindex(self, data, inline):
175 # call the C implementation to parse the index data
175 # call the C implementation to parse the index data
176 index, cache = parsers.parse_index2(data, inline)
176 index, cache = parsers.parse_index2(data, inline)
177 return index, None, cache
177 return index, None, cache
178
178
179 def packentry(self, entry, node, version, rev):
179 def packentry(self, entry, node, version, rev):
180 p = _pack(indexformatng, *entry)
180 p = _pack(indexformatng, *entry)
181 if rev == 0:
181 if rev == 0:
182 p = _pack(versionformat, version) + p[4:]
182 p = _pack(versionformat, version) + p[4:]
183 return p
183 return p
184
184
185 class revlog(object):
185 class revlog(object):
186 """
186 """
187 the underlying revision storage object
187 the underlying revision storage object
188
188
189 A revlog consists of two parts, an index and the revision data.
189 A revlog consists of two parts, an index and the revision data.
190
190
191 The index is a file with a fixed record size containing
191 The index is a file with a fixed record size containing
192 information on each revision, including its nodeid (hash), the
192 information on each revision, including its nodeid (hash), the
193 nodeids of its parents, the position and offset of its data within
193 nodeids of its parents, the position and offset of its data within
194 the data file, and the revision it's based on. Finally, each entry
194 the data file, and the revision it's based on. Finally, each entry
195 contains a linkrev entry that can serve as a pointer to external
195 contains a linkrev entry that can serve as a pointer to external
196 data.
196 data.
197
197
198 The revision data itself is a linear collection of data chunks.
198 The revision data itself is a linear collection of data chunks.
199 Each chunk represents a revision and is usually represented as a
199 Each chunk represents a revision and is usually represented as a
200 delta against the previous chunk. To bound lookup time, runs of
200 delta against the previous chunk. To bound lookup time, runs of
201 deltas are limited to about 2 times the length of the original
201 deltas are limited to about 2 times the length of the original
202 version data. This makes retrieval of a version proportional to
202 version data. This makes retrieval of a version proportional to
203 its size, or O(1) relative to the number of revisions.
203 its size, or O(1) relative to the number of revisions.
204
204
205 Both pieces of the revlog are written to in an append-only
205 Both pieces of the revlog are written to in an append-only
206 fashion, which means we never need to rewrite a file to insert or
206 fashion, which means we never need to rewrite a file to insert or
207 remove data, and can use some simple techniques to avoid the need
207 remove data, and can use some simple techniques to avoid the need
208 for locking while reading.
208 for locking while reading.
209 """
209 """
210 def __init__(self, opener, indexfile):
210 def __init__(self, opener, indexfile):
211 """
211 """
212 create a revlog object
212 create a revlog object
213
213
214 opener is a function that abstracts the file opening operation
214 opener is a function that abstracts the file opening operation
215 and can be used to implement COW semantics or the like.
215 and can be used to implement COW semantics or the like.
216 """
216 """
217 self.indexfile = indexfile
217 self.indexfile = indexfile
218 self.datafile = indexfile[:-2] + ".d"
218 self.datafile = indexfile[:-2] + ".d"
219 self.opener = opener
219 self.opener = opener
220 self._cache = None
220 self._cache = None
221 self._basecache = (0, 0)
221 self._basecache = (0, 0)
222 self._chunkcache = (0, '')
222 self._chunkcache = (0, '')
223 self.index = []
223 self.index = []
224 self._pcache = {}
224 self._pcache = {}
225 self._nodecache = {nullid: nullrev}
225 self._nodecache = {nullid: nullrev}
226 self._nodepos = None
226 self._nodepos = None
227
227
228 v = REVLOG_DEFAULT_VERSION
228 v = REVLOG_DEFAULT_VERSION
229 if hasattr(opener, 'options'):
229 if hasattr(opener, 'options'):
230 if 'revlogv1' in opener.options:
230 if 'revlogv1' in opener.options:
231 if 'generaldelta' in opener.options:
231 if 'generaldelta' in opener.options:
232 v |= REVLOGGENERALDELTA
232 v |= REVLOGGENERALDELTA
233 else:
233 else:
234 v = 0
234 v = 0
235
235
236 i = ''
236 i = ''
237 self._initempty = True
237 self._initempty = True
238 try:
238 try:
239 f = self.opener(self.indexfile)
239 f = self.opener(self.indexfile)
240 i = f.read()
240 i = f.read()
241 f.close()
241 f.close()
242 if len(i) > 0:
242 if len(i) > 0:
243 v = struct.unpack(versionformat, i[:4])[0]
243 v = struct.unpack(versionformat, i[:4])[0]
244 self._initempty = False
244 self._initempty = False
245 except IOError, inst:
245 except IOError, inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248
248
249 self.version = v
249 self.version = v
250 self._inline = v & REVLOGNGINLINEDATA
250 self._inline = v & REVLOGNGINLINEDATA
251 self._generaldelta = v & REVLOGGENERALDELTA
251 self._generaldelta = v & REVLOGGENERALDELTA
252 flags = v & ~0xFFFF
252 flags = v & ~0xFFFF
253 fmt = v & 0xFFFF
253 fmt = v & 0xFFFF
254 if fmt == REVLOGV0 and flags:
254 if fmt == REVLOGV0 and flags:
255 raise RevlogError(_("index %s unknown flags %#04x for format v0")
255 raise RevlogError(_("index %s unknown flags %#04x for format v0")
256 % (self.indexfile, flags >> 16))
256 % (self.indexfile, flags >> 16))
257 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
257 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
258 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
258 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
259 % (self.indexfile, flags >> 16))
259 % (self.indexfile, flags >> 16))
260 elif fmt > REVLOGNG:
260 elif fmt > REVLOGNG:
261 raise RevlogError(_("index %s unknown format %d")
261 raise RevlogError(_("index %s unknown format %d")
262 % (self.indexfile, fmt))
262 % (self.indexfile, fmt))
263
263
264 self._io = revlogio()
264 self._io = revlogio()
265 if self.version == REVLOGV0:
265 if self.version == REVLOGV0:
266 self._io = revlogoldio()
266 self._io = revlogoldio()
267 try:
267 try:
268 d = self._io.parseindex(i, self._inline)
268 d = self._io.parseindex(i, self._inline)
269 except (ValueError, IndexError):
269 except (ValueError, IndexError):
270 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
270 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
271 self.index, nodemap, self._chunkcache = d
271 self.index, nodemap, self._chunkcache = d
272 if nodemap is not None:
272 if nodemap is not None:
273 self.nodemap = self._nodecache = nodemap
273 self.nodemap = self._nodecache = nodemap
274 if not self._chunkcache:
274 if not self._chunkcache:
275 self._chunkclear()
275 self._chunkclear()
276
276
277 def tip(self):
277 def tip(self):
278 return self.node(len(self.index) - 2)
278 return self.node(len(self.index) - 2)
279 def __len__(self):
279 def __len__(self):
280 return len(self.index) - 1
280 return len(self.index) - 1
281 def __iter__(self):
281 def __iter__(self):
282 for i in xrange(len(self)):
282 for i in xrange(len(self)):
283 yield i
283 yield i
284
284
285 @util.propertycache
285 @util.propertycache
286 def nodemap(self):
286 def nodemap(self):
287 self.rev(self.node(0))
287 self.rev(self.node(0))
288 return self._nodecache
288 return self._nodecache
289
289
290 def rev(self, node):
290 def rev(self, node):
291 try:
291 try:
292 return self._nodecache[node]
292 return self._nodecache[node]
293 except KeyError:
293 except KeyError:
294 n = self._nodecache
294 n = self._nodecache
295 i = self.index
295 i = self.index
296 p = self._nodepos
296 p = self._nodepos
297 if p is None:
297 if p is None:
298 p = len(i) - 2
298 p = len(i) - 2
299 for r in xrange(p, -1, -1):
299 for r in xrange(p, -1, -1):
300 v = i[r][7]
300 v = i[r][7]
301 n[v] = r
301 n[v] = r
302 if v == node:
302 if v == node:
303 self._nodepos = r - 1
303 self._nodepos = r - 1
304 return r
304 return r
305 raise LookupError(node, self.indexfile, _('no node'))
305 raise LookupError(node, self.indexfile, _('no node'))
306
306
307 def node(self, rev):
307 def node(self, rev):
308 return self.index[rev][7]
308 return self.index[rev][7]
309 def linkrev(self, rev):
309 def linkrev(self, rev):
310 return self.index[rev][4]
310 return self.index[rev][4]
311 def parents(self, node):
311 def parents(self, node):
312 i = self.index
312 i = self.index
313 d = i[self.rev(node)]
313 d = i[self.rev(node)]
314 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
314 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
315 def parentrevs(self, rev):
315 def parentrevs(self, rev):
316 return self.index[rev][5:7]
316 return self.index[rev][5:7]
317 def start(self, rev):
317 def start(self, rev):
318 return int(self.index[rev][0] >> 16)
318 return int(self.index[rev][0] >> 16)
319 def end(self, rev):
319 def end(self, rev):
320 return self.start(rev) + self.length(rev)
320 return self.start(rev) + self.length(rev)
321 def length(self, rev):
321 def length(self, rev):
322 return self.index[rev][1]
322 return self.index[rev][1]
323 def chainbase(self, rev):
323 def chainbase(self, rev):
324 index = self.index
324 index = self.index
325 base = index[rev][3]
325 base = index[rev][3]
326 while base != rev:
326 while base != rev:
327 rev = base
327 rev = base
328 base = index[rev][3]
328 base = index[rev][3]
329 return base
329 return base
330 def flags(self, rev):
330 def flags(self, rev):
331 return self.index[rev][0] & 0xFFFF
331 return self.index[rev][0] & 0xFFFF
332 def rawsize(self, rev):
332 def rawsize(self, rev):
333 """return the length of the uncompressed text for a given revision"""
333 """return the length of the uncompressed text for a given revision"""
334 l = self.index[rev][2]
334 l = self.index[rev][2]
335 if l >= 0:
335 if l >= 0:
336 return l
336 return l
337
337
338 t = self.revision(self.node(rev))
338 t = self.revision(self.node(rev))
339 return len(t)
339 return len(t)
340 size = rawsize
340 size = rawsize
341
341
342 def reachable(self, node, stop=None):
342 def reachable(self, node, stop=None):
343 """return the set of all nodes ancestral to a given node, including
343 """return the set of all nodes ancestral to a given node, including
344 the node itself, stopping when stop is matched"""
344 the node itself, stopping when stop is matched"""
345 reachable = set((node,))
345 reachable = set((node,))
346 visit = [node]
346 visit = [node]
347 if stop:
347 if stop:
348 stopn = self.rev(stop)
348 stopn = self.rev(stop)
349 else:
349 else:
350 stopn = 0
350 stopn = 0
351 while visit:
351 while visit:
352 n = visit.pop(0)
352 n = visit.pop(0)
353 if n == stop:
353 if n == stop:
354 continue
354 continue
355 if n == nullid:
355 if n == nullid:
356 continue
356 continue
357 for p in self.parents(n):
357 for p in self.parents(n):
358 if self.rev(p) < stopn:
358 if self.rev(p) < stopn:
359 continue
359 continue
360 if p not in reachable:
360 if p not in reachable:
361 reachable.add(p)
361 reachable.add(p)
362 visit.append(p)
362 visit.append(p)
363 return reachable
363 return reachable
364
364
365 def ancestors(self, *revs):
365 def ancestors(self, *revs):
366 """Generate the ancestors of 'revs' in reverse topological order.
366 """Generate the ancestors of 'revs' in reverse topological order.
367
367
368 Yield a sequence of revision numbers starting with the parents
368 Yield a sequence of revision numbers starting with the parents
369 of each revision in revs, i.e., each revision is *not* considered
369 of each revision in revs, i.e., each revision is *not* considered
370 an ancestor of itself. Results are in breadth-first order:
370 an ancestor of itself. Results are in breadth-first order:
371 parents of each rev in revs, then parents of those, etc. Result
371 parents of each rev in revs, then parents of those, etc. Result
372 does not include the null revision."""
372 does not include the null revision."""
373 visit = list(revs)
373 visit = list(revs)
374 seen = set([nullrev])
374 seen = set([nullrev])
375 while visit:
375 while visit:
376 for parent in self.parentrevs(visit.pop(0)):
376 for parent in self.parentrevs(visit.pop(0)):
377 if parent not in seen:
377 if parent not in seen:
378 visit.append(parent)
378 visit.append(parent)
379 seen.add(parent)
379 seen.add(parent)
380 yield parent
380 yield parent
381
381
382 def descendants(self, *revs):
382 def descendants(self, *revs):
383 """Generate the descendants of 'revs' in revision order.
383 """Generate the descendants of 'revs' in revision order.
384
384
385 Yield a sequence of revision numbers starting with a child of
385 Yield a sequence of revision numbers starting with a child of
386 some rev in revs, i.e., each revision is *not* considered a
386 some rev in revs, i.e., each revision is *not* considered a
387 descendant of itself. Results are ordered by revision number (a
387 descendant of itself. Results are ordered by revision number (a
388 topological sort)."""
388 topological sort)."""
389 first = min(revs)
389 first = min(revs)
390 if first == nullrev:
390 if first == nullrev:
391 for i in self:
391 for i in self:
392 yield i
392 yield i
393 return
393 return
394
394
395 seen = set(revs)
395 seen = set(revs)
396 for i in xrange(first + 1, len(self)):
396 for i in xrange(first + 1, len(self)):
397 for x in self.parentrevs(i):
397 for x in self.parentrevs(i):
398 if x != nullrev and x in seen:
398 if x != nullrev and x in seen:
399 seen.add(i)
399 seen.add(i)
400 yield i
400 yield i
401 break
401 break
402
402
403 def findcommonmissing(self, common=None, heads=None):
403 def findcommonmissing(self, common=None, heads=None):
404 """Return a tuple of the ancestors of common and the ancestors of heads
404 """Return a tuple of the ancestors of common and the ancestors of heads
405 that are not ancestors of common.
405 that are not ancestors of common.
406
406
407 More specifically, the second element is a list of nodes N such that
407 More specifically, the second element is a list of nodes N such that
408 every N satisfies the following constraints:
408 every N satisfies the following constraints:
409
409
410 1. N is an ancestor of some node in 'heads'
410 1. N is an ancestor of some node in 'heads'
411 2. N is not an ancestor of any node in 'common'
411 2. N is not an ancestor of any node in 'common'
412
412
413 The list is sorted by revision number, meaning it is
413 The list is sorted by revision number, meaning it is
414 topologically sorted.
414 topologically sorted.
415
415
416 'heads' and 'common' are both lists of node IDs. If heads is
416 'heads' and 'common' are both lists of node IDs. If heads is
417 not supplied, uses all of the revlog's heads. If common is not
417 not supplied, uses all of the revlog's heads. If common is not
418 supplied, uses nullid."""
418 supplied, uses nullid."""
419 if common is None:
419 if common is None:
420 common = [nullid]
420 common = [nullid]
421 if heads is None:
421 if heads is None:
422 heads = self.heads()
422 heads = self.heads()
423
423
424 common = [self.rev(n) for n in common]
424 common = [self.rev(n) for n in common]
425 heads = [self.rev(n) for n in heads]
425 heads = [self.rev(n) for n in heads]
426
426
427 # we want the ancestors, but inclusive
427 # we want the ancestors, but inclusive
428 has = set(self.ancestors(*common))
428 has = set(self.ancestors(*common))
429 has.add(nullrev)
429 has.add(nullrev)
430 has.update(common)
430 has.update(common)
431
431
432 # take all ancestors from heads that aren't in has
432 # take all ancestors from heads that aren't in has
433 missing = set()
433 missing = set()
434 visit = [r for r in heads if r not in has]
434 visit = [r for r in heads if r not in has]
435 while visit:
435 while visit:
436 r = visit.pop(0)
436 r = visit.pop(0)
437 if r in missing:
437 if r in missing:
438 continue
438 continue
439 else:
439 else:
440 missing.add(r)
440 missing.add(r)
441 for p in self.parentrevs(r):
441 for p in self.parentrevs(r):
442 if p not in has:
442 if p not in has:
443 visit.append(p)
443 visit.append(p)
444 missing = list(missing)
444 missing = list(missing)
445 missing.sort()
445 missing.sort()
446 return has, [self.node(r) for r in missing]
446 return has, [self.node(r) for r in missing]
447
447
448 def findmissing(self, common=None, heads=None):
448 def findmissing(self, common=None, heads=None):
449 """Return the ancestors of heads that are not ancestors of common.
449 """Return the ancestors of heads that are not ancestors of common.
450
450
451 More specifically, return a list of nodes N such that every N
451 More specifically, return a list of nodes N such that every N
452 satisfies the following constraints:
452 satisfies the following constraints:
453
453
454 1. N is an ancestor of some node in 'heads'
454 1. N is an ancestor of some node in 'heads'
455 2. N is not an ancestor of any node in 'common'
455 2. N is not an ancestor of any node in 'common'
456
456
457 The list is sorted by revision number, meaning it is
457 The list is sorted by revision number, meaning it is
458 topologically sorted.
458 topologically sorted.
459
459
460 'heads' and 'common' are both lists of node IDs. If heads is
460 'heads' and 'common' are both lists of node IDs. If heads is
461 not supplied, uses all of the revlog's heads. If common is not
461 not supplied, uses all of the revlog's heads. If common is not
462 supplied, uses nullid."""
462 supplied, uses nullid."""
463 _common, missing = self.findcommonmissing(common, heads)
463 _common, missing = self.findcommonmissing(common, heads)
464 return missing
464 return missing
465
465
466 def nodesbetween(self, roots=None, heads=None):
466 def nodesbetween(self, roots=None, heads=None):
467 """Return a topological path from 'roots' to 'heads'.
467 """Return a topological path from 'roots' to 'heads'.
468
468
469 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
469 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
470 topologically sorted list of all nodes N that satisfy both of
470 topologically sorted list of all nodes N that satisfy both of
471 these constraints:
471 these constraints:
472
472
473 1. N is a descendant of some node in 'roots'
473 1. N is a descendant of some node in 'roots'
474 2. N is an ancestor of some node in 'heads'
474 2. N is an ancestor of some node in 'heads'
475
475
476 Every node is considered to be both a descendant and an ancestor
476 Every node is considered to be both a descendant and an ancestor
477 of itself, so every reachable node in 'roots' and 'heads' will be
477 of itself, so every reachable node in 'roots' and 'heads' will be
478 included in 'nodes'.
478 included in 'nodes'.
479
479
480 'outroots' is the list of reachable nodes in 'roots', i.e., the
480 'outroots' is the list of reachable nodes in 'roots', i.e., the
481 subset of 'roots' that is returned in 'nodes'. Likewise,
481 subset of 'roots' that is returned in 'nodes'. Likewise,
482 'outheads' is the subset of 'heads' that is also in 'nodes'.
482 'outheads' is the subset of 'heads' that is also in 'nodes'.
483
483
484 'roots' and 'heads' are both lists of node IDs. If 'roots' is
484 'roots' and 'heads' are both lists of node IDs. If 'roots' is
485 unspecified, uses nullid as the only root. If 'heads' is
485 unspecified, uses nullid as the only root. If 'heads' is
486 unspecified, uses list of all of the revlog's heads."""
486 unspecified, uses list of all of the revlog's heads."""
487 nonodes = ([], [], [])
487 nonodes = ([], [], [])
488 if roots is not None:
488 if roots is not None:
489 roots = list(roots)
489 roots = list(roots)
490 if not roots:
490 if not roots:
491 return nonodes
491 return nonodes
492 lowestrev = min([self.rev(n) for n in roots])
492 lowestrev = min([self.rev(n) for n in roots])
493 else:
493 else:
494 roots = [nullid] # Everybody's a descendent of nullid
494 roots = [nullid] # Everybody's a descendent of nullid
495 lowestrev = nullrev
495 lowestrev = nullrev
496 if (lowestrev == nullrev) and (heads is None):
496 if (lowestrev == nullrev) and (heads is None):
497 # We want _all_ the nodes!
497 # We want _all_ the nodes!
498 return ([self.node(r) for r in self], [nullid], list(self.heads()))
498 return ([self.node(r) for r in self], [nullid], list(self.heads()))
499 if heads is None:
499 if heads is None:
500 # All nodes are ancestors, so the latest ancestor is the last
500 # All nodes are ancestors, so the latest ancestor is the last
501 # node.
501 # node.
502 highestrev = len(self) - 1
502 highestrev = len(self) - 1
503 # Set ancestors to None to signal that every node is an ancestor.
503 # Set ancestors to None to signal that every node is an ancestor.
504 ancestors = None
504 ancestors = None
505 # Set heads to an empty dictionary for later discovery of heads
505 # Set heads to an empty dictionary for later discovery of heads
506 heads = {}
506 heads = {}
507 else:
507 else:
508 heads = list(heads)
508 heads = list(heads)
509 if not heads:
509 if not heads:
510 return nonodes
510 return nonodes
511 ancestors = set()
511 ancestors = set()
512 # Turn heads into a dictionary so we can remove 'fake' heads.
512 # Turn heads into a dictionary so we can remove 'fake' heads.
513 # Also, later we will be using it to filter out the heads we can't
513 # Also, later we will be using it to filter out the heads we can't
514 # find from roots.
514 # find from roots.
515 heads = dict.fromkeys(heads, False)
515 heads = dict.fromkeys(heads, False)
516 # Start at the top and keep marking parents until we're done.
516 # Start at the top and keep marking parents until we're done.
517 nodestotag = set(heads)
517 nodestotag = set(heads)
518 # Remember where the top was so we can use it as a limit later.
518 # Remember where the top was so we can use it as a limit later.
519 highestrev = max([self.rev(n) for n in nodestotag])
519 highestrev = max([self.rev(n) for n in nodestotag])
520 while nodestotag:
520 while nodestotag:
521 # grab a node to tag
521 # grab a node to tag
522 n = nodestotag.pop()
522 n = nodestotag.pop()
523 # Never tag nullid
523 # Never tag nullid
524 if n == nullid:
524 if n == nullid:
525 continue
525 continue
526 # A node's revision number represents its place in a
526 # A node's revision number represents its place in a
527 # topologically sorted list of nodes.
527 # topologically sorted list of nodes.
528 r = self.rev(n)
528 r = self.rev(n)
529 if r >= lowestrev:
529 if r >= lowestrev:
530 if n not in ancestors:
530 if n not in ancestors:
531 # If we are possibly a descendent of one of the roots
531 # If we are possibly a descendent of one of the roots
532 # and we haven't already been marked as an ancestor
532 # and we haven't already been marked as an ancestor
533 ancestors.add(n) # Mark as ancestor
533 ancestors.add(n) # Mark as ancestor
534 # Add non-nullid parents to list of nodes to tag.
534 # Add non-nullid parents to list of nodes to tag.
535 nodestotag.update([p for p in self.parents(n) if
535 nodestotag.update([p for p in self.parents(n) if
536 p != nullid])
536 p != nullid])
537 elif n in heads: # We've seen it before, is it a fake head?
537 elif n in heads: # We've seen it before, is it a fake head?
538 # So it is, real heads should not be the ancestors of
538 # So it is, real heads should not be the ancestors of
539 # any other heads.
539 # any other heads.
540 heads.pop(n)
540 heads.pop(n)
541 if not ancestors:
541 if not ancestors:
542 return nonodes
542 return nonodes
543 # Now that we have our set of ancestors, we want to remove any
543 # Now that we have our set of ancestors, we want to remove any
544 # roots that are not ancestors.
544 # roots that are not ancestors.
545
545
546 # If one of the roots was nullid, everything is included anyway.
546 # If one of the roots was nullid, everything is included anyway.
547 if lowestrev > nullrev:
547 if lowestrev > nullrev:
548 # But, since we weren't, let's recompute the lowest rev to not
548 # But, since we weren't, let's recompute the lowest rev to not
549 # include roots that aren't ancestors.
549 # include roots that aren't ancestors.
550
550
551 # Filter out roots that aren't ancestors of heads
551 # Filter out roots that aren't ancestors of heads
552 roots = [n for n in roots if n in ancestors]
552 roots = [n for n in roots if n in ancestors]
553 # Recompute the lowest revision
553 # Recompute the lowest revision
554 if roots:
554 if roots:
555 lowestrev = min([self.rev(n) for n in roots])
555 lowestrev = min([self.rev(n) for n in roots])
556 else:
556 else:
557 # No more roots? Return empty list
557 # No more roots? Return empty list
558 return nonodes
558 return nonodes
559 else:
559 else:
560 # We are descending from nullid, and don't need to care about
560 # We are descending from nullid, and don't need to care about
561 # any other roots.
561 # any other roots.
562 lowestrev = nullrev
562 lowestrev = nullrev
563 roots = [nullid]
563 roots = [nullid]
564 # Transform our roots list into a set.
564 # Transform our roots list into a set.
565 descendents = set(roots)
565 descendents = set(roots)
566 # Also, keep the original roots so we can filter out roots that aren't
566 # Also, keep the original roots so we can filter out roots that aren't
567 # 'real' roots (i.e. are descended from other roots).
567 # 'real' roots (i.e. are descended from other roots).
568 roots = descendents.copy()
568 roots = descendents.copy()
569 # Our topologically sorted list of output nodes.
569 # Our topologically sorted list of output nodes.
570 orderedout = []
570 orderedout = []
571 # Don't start at nullid since we don't want nullid in our output list,
571 # Don't start at nullid since we don't want nullid in our output list,
572 # and if nullid shows up in descedents, empty parents will look like
572 # and if nullid shows up in descedents, empty parents will look like
573 # they're descendents.
573 # they're descendents.
574 for r in xrange(max(lowestrev, 0), highestrev + 1):
574 for r in xrange(max(lowestrev, 0), highestrev + 1):
575 n = self.node(r)
575 n = self.node(r)
576 isdescendent = False
576 isdescendent = False
577 if lowestrev == nullrev: # Everybody is a descendent of nullid
577 if lowestrev == nullrev: # Everybody is a descendent of nullid
578 isdescendent = True
578 isdescendent = True
579 elif n in descendents:
579 elif n in descendents:
580 # n is already a descendent
580 # n is already a descendent
581 isdescendent = True
581 isdescendent = True
582 # This check only needs to be done here because all the roots
582 # This check only needs to be done here because all the roots
583 # will start being marked is descendents before the loop.
583 # will start being marked is descendents before the loop.
584 if n in roots:
584 if n in roots:
585 # If n was a root, check if it's a 'real' root.
585 # If n was a root, check if it's a 'real' root.
586 p = tuple(self.parents(n))
586 p = tuple(self.parents(n))
587 # If any of its parents are descendents, it's not a root.
587 # If any of its parents are descendents, it's not a root.
588 if (p[0] in descendents) or (p[1] in descendents):
588 if (p[0] in descendents) or (p[1] in descendents):
589 roots.remove(n)
589 roots.remove(n)
590 else:
590 else:
591 p = tuple(self.parents(n))
591 p = tuple(self.parents(n))
592 # A node is a descendent if either of its parents are
592 # A node is a descendent if either of its parents are
593 # descendents. (We seeded the dependents list with the roots
593 # descendents. (We seeded the dependents list with the roots
594 # up there, remember?)
594 # up there, remember?)
595 if (p[0] in descendents) or (p[1] in descendents):
595 if (p[0] in descendents) or (p[1] in descendents):
596 descendents.add(n)
596 descendents.add(n)
597 isdescendent = True
597 isdescendent = True
598 if isdescendent and ((ancestors is None) or (n in ancestors)):
598 if isdescendent and ((ancestors is None) or (n in ancestors)):
599 # Only include nodes that are both descendents and ancestors.
599 # Only include nodes that are both descendents and ancestors.
600 orderedout.append(n)
600 orderedout.append(n)
601 if (ancestors is not None) and (n in heads):
601 if (ancestors is not None) and (n in heads):
602 # We're trying to figure out which heads are reachable
602 # We're trying to figure out which heads are reachable
603 # from roots.
603 # from roots.
604 # Mark this head as having been reached
604 # Mark this head as having been reached
605 heads[n] = True
605 heads[n] = True
606 elif ancestors is None:
606 elif ancestors is None:
607 # Otherwise, we're trying to discover the heads.
607 # Otherwise, we're trying to discover the heads.
608 # Assume this is a head because if it isn't, the next step
608 # Assume this is a head because if it isn't, the next step
609 # will eventually remove it.
609 # will eventually remove it.
610 heads[n] = True
610 heads[n] = True
611 # But, obviously its parents aren't.
611 # But, obviously its parents aren't.
612 for p in self.parents(n):
612 for p in self.parents(n):
613 heads.pop(p, None)
613 heads.pop(p, None)
614 heads = [n for n, flag in heads.iteritems() if flag]
614 heads = [n for n, flag in heads.iteritems() if flag]
615 roots = list(roots)
615 roots = list(roots)
616 assert orderedout
616 assert orderedout
617 assert roots
617 assert roots
618 assert heads
618 assert heads
619 return (orderedout, roots, heads)
619 return (orderedout, roots, heads)
620
620
621 def headrevs(self):
621 def headrevs(self):
622 count = len(self)
622 count = len(self)
623 if not count:
623 if not count:
624 return [nullrev]
624 return [nullrev]
625 ishead = [1] * (count + 1)
625 ishead = [1] * (count + 1)
626 index = self.index
626 index = self.index
627 for r in xrange(count):
627 for r in xrange(count):
628 e = index[r]
628 e = index[r]
629 ishead[e[5]] = ishead[e[6]] = 0
629 ishead[e[5]] = ishead[e[6]] = 0
630 return [r for r in xrange(count) if ishead[r]]
630 return [r for r in xrange(count) if ishead[r]]
631
631
632 def heads(self, start=None, stop=None):
632 def heads(self, start=None, stop=None):
633 """return the list of all nodes that have no children
633 """return the list of all nodes that have no children
634
634
635 if start is specified, only heads that are descendants of
635 if start is specified, only heads that are descendants of
636 start will be returned
636 start will be returned
637 if stop is specified, it will consider all the revs from stop
637 if stop is specified, it will consider all the revs from stop
638 as if they had no children
638 as if they had no children
639 """
639 """
640 if start is None and stop is None:
640 if start is None and stop is None:
641 if not len(self):
641 if not len(self):
642 return [nullid]
642 return [nullid]
643 return [self.node(r) for r in self.headrevs()]
643 return [self.node(r) for r in self.headrevs()]
644
644
645 if start is None:
645 if start is None:
646 start = nullid
646 start = nullid
647 if stop is None:
647 if stop is None:
648 stop = []
648 stop = []
649 stoprevs = set([self.rev(n) for n in stop])
649 stoprevs = set([self.rev(n) for n in stop])
650 startrev = self.rev(start)
650 startrev = self.rev(start)
651 reachable = set((startrev,))
651 reachable = set((startrev,))
652 heads = set((startrev,))
652 heads = set((startrev,))
653
653
654 parentrevs = self.parentrevs
654 parentrevs = self.parentrevs
655 for r in xrange(startrev + 1, len(self)):
655 for r in xrange(startrev + 1, len(self)):
656 for p in parentrevs(r):
656 for p in parentrevs(r):
657 if p in reachable:
657 if p in reachable:
658 if r not in stoprevs:
658 if r not in stoprevs:
659 reachable.add(r)
659 reachable.add(r)
660 heads.add(r)
660 heads.add(r)
661 if p in heads and p not in stoprevs:
661 if p in heads and p not in stoprevs:
662 heads.remove(p)
662 heads.remove(p)
663
663
664 return [self.node(r) for r in heads]
664 return [self.node(r) for r in heads]
665
665
666 def children(self, node):
666 def children(self, node):
667 """find the children of a given node"""
667 """find the children of a given node"""
668 c = []
668 c = []
669 p = self.rev(node)
669 p = self.rev(node)
670 for r in range(p + 1, len(self)):
670 for r in range(p + 1, len(self)):
671 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
671 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
672 if prevs:
672 if prevs:
673 for pr in prevs:
673 for pr in prevs:
674 if pr == p:
674 if pr == p:
675 c.append(self.node(r))
675 c.append(self.node(r))
676 elif p == nullrev:
676 elif p == nullrev:
677 c.append(self.node(r))
677 c.append(self.node(r))
678 return c
678 return c
679
679
680 def descendant(self, start, end):
680 def descendant(self, start, end):
681 if start == nullrev:
681 if start == nullrev:
682 return True
682 return True
683 for i in self.descendants(start):
683 for i in self.descendants(start):
684 if i == end:
684 if i == end:
685 return True
685 return True
686 elif i > end:
686 elif i > end:
687 break
687 break
688 return False
688 return False
689
689
690 def ancestor(self, a, b):
690 def ancestor(self, a, b):
691 """calculate the least common ancestor of nodes a and b"""
691 """calculate the least common ancestor of nodes a and b"""
692
692
693 # fast path, check if it is a descendant
693 # fast path, check if it is a descendant
694 a, b = self.rev(a), self.rev(b)
694 a, b = self.rev(a), self.rev(b)
695 start, end = sorted((a, b))
695 start, end = sorted((a, b))
696 if self.descendant(start, end):
696 if self.descendant(start, end):
697 return self.node(start)
697 return self.node(start)
698
698
699 def parents(rev):
699 def parents(rev):
700 return [p for p in self.parentrevs(rev) if p != nullrev]
700 return [p for p in self.parentrevs(rev) if p != nullrev]
701
701
702 c = ancestor.ancestor(a, b, parents)
702 c = ancestor.ancestor(a, b, parents)
703 if c is None:
703 if c is None:
704 return nullid
704 return nullid
705
705
706 return self.node(c)
706 return self.node(c)
707
707
708 def _match(self, id):
708 def _match(self, id):
709 if isinstance(id, (long, int)):
709 if isinstance(id, (long, int)):
710 # rev
710 # rev
711 return self.node(id)
711 return self.node(id)
712 if len(id) == 20:
712 if len(id) == 20:
713 # possibly a binary node
713 # possibly a binary node
714 # odds of a binary node being all hex in ASCII are 1 in 10**25
714 # odds of a binary node being all hex in ASCII are 1 in 10**25
715 try:
715 try:
716 node = id
716 node = id
717 self.rev(node) # quick search the index
717 self.rev(node) # quick search the index
718 return node
718 return node
719 except LookupError:
719 except LookupError:
720 pass # may be partial hex id
720 pass # may be partial hex id
721 try:
721 try:
722 # str(rev)
722 # str(rev)
723 rev = int(id)
723 rev = int(id)
724 if str(rev) != id:
724 if str(rev) != id:
725 raise ValueError
725 raise ValueError
726 if rev < 0:
726 if rev < 0:
727 rev = len(self) + rev
727 rev = len(self) + rev
728 if rev < 0 or rev >= len(self):
728 if rev < 0 or rev >= len(self):
729 raise ValueError
729 raise ValueError
730 return self.node(rev)
730 return self.node(rev)
731 except (ValueError, OverflowError):
731 except (ValueError, OverflowError):
732 pass
732 pass
733 if len(id) == 40:
733 if len(id) == 40:
734 try:
734 try:
735 # a full hex nodeid?
735 # a full hex nodeid?
736 node = bin(id)
736 node = bin(id)
737 self.rev(node)
737 self.rev(node)
738 return node
738 return node
739 except (TypeError, LookupError):
739 except (TypeError, LookupError):
740 pass
740 pass
741
741
742 def _partialmatch(self, id):
742 def _partialmatch(self, id):
743 if id in self._pcache:
743 if id in self._pcache:
744 return self._pcache[id]
744 return self._pcache[id]
745
745
746 if len(id) < 40:
746 if len(id) < 40:
747 try:
747 try:
748 # hex(node)[:...]
748 # hex(node)[:...]
749 l = len(id) // 2 # grab an even number of digits
749 l = len(id) // 2 # grab an even number of digits
750 prefix = bin(id[:l * 2])
750 prefix = bin(id[:l * 2])
751 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
751 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
752 nl = [n for n in nl if hex(n).startswith(id)]
752 nl = [n for n in nl if hex(n).startswith(id)]
753 if len(nl) > 0:
753 if len(nl) > 0:
754 if len(nl) == 1:
754 if len(nl) == 1:
755 self._pcache[id] = nl[0]
755 self._pcache[id] = nl[0]
756 return nl[0]
756 return nl[0]
757 raise LookupError(id, self.indexfile,
757 raise LookupError(id, self.indexfile,
758 _('ambiguous identifier'))
758 _('ambiguous identifier'))
759 return None
759 return None
760 except TypeError:
760 except TypeError:
761 pass
761 pass
762
762
763 def lookup(self, id):
763 def lookup(self, id):
764 """locate a node based on:
764 """locate a node based on:
765 - revision number or str(revision number)
765 - revision number or str(revision number)
766 - nodeid or subset of hex nodeid
766 - nodeid or subset of hex nodeid
767 """
767 """
768 n = self._match(id)
768 n = self._match(id)
769 if n is not None:
769 if n is not None:
770 return n
770 return n
771 n = self._partialmatch(id)
771 n = self._partialmatch(id)
772 if n:
772 if n:
773 return n
773 return n
774
774
775 raise LookupError(id, self.indexfile, _('no match found'))
775 raise LookupError(id, self.indexfile, _('no match found'))
776
776
777 def cmp(self, node, text):
777 def cmp(self, node, text):
778 """compare text with a given file revision
778 """compare text with a given file revision
779
779
780 returns True if text is different than what is stored.
780 returns True if text is different than what is stored.
781 """
781 """
782 p1, p2 = self.parents(node)
782 p1, p2 = self.parents(node)
783 return hash(text, p1, p2) != node
783 return hash(text, p1, p2) != node
784
784
785 def _addchunk(self, offset, data):
785 def _addchunk(self, offset, data):
786 o, d = self._chunkcache
786 o, d = self._chunkcache
787 # try to add to existing cache
787 # try to add to existing cache
788 if o + len(d) == offset and len(d) + len(data) < _chunksize:
788 if o + len(d) == offset and len(d) + len(data) < _chunksize:
789 self._chunkcache = o, d + data
789 self._chunkcache = o, d + data
790 else:
790 else:
791 self._chunkcache = offset, data
791 self._chunkcache = offset, data
792
792
793 def _loadchunk(self, offset, length):
793 def _loadchunk(self, offset, length):
794 if self._inline:
794 if self._inline:
795 df = self.opener(self.indexfile)
795 df = self.opener(self.indexfile)
796 else:
796 else:
797 df = self.opener(self.datafile)
797 df = self.opener(self.datafile)
798
798
799 readahead = max(65536, length)
799 readahead = max(65536, length)
800 df.seek(offset)
800 df.seek(offset)
801 d = df.read(readahead)
801 d = df.read(readahead)
802 self._addchunk(offset, d)
802 self._addchunk(offset, d)
803 if readahead > length:
803 if readahead > length:
804 return d[:length]
804 return d[:length]
805 return d
805 return d
806
806
807 def _getchunk(self, offset, length):
807 def _getchunk(self, offset, length):
808 o, d = self._chunkcache
808 o, d = self._chunkcache
809 l = len(d)
809 l = len(d)
810
810
811 # is it in the cache?
811 # is it in the cache?
812 cachestart = offset - o
812 cachestart = offset - o
813 cacheend = cachestart + length
813 cacheend = cachestart + length
814 if cachestart >= 0 and cacheend <= l:
814 if cachestart >= 0 and cacheend <= l:
815 if cachestart == 0 and cacheend == l:
815 if cachestart == 0 and cacheend == l:
816 return d # avoid a copy
816 return d # avoid a copy
817 return d[cachestart:cacheend]
817 return d[cachestart:cacheend]
818
818
819 return self._loadchunk(offset, length)
819 return self._loadchunk(offset, length)
820
820
821 def _chunkraw(self, startrev, endrev):
821 def _chunkraw(self, startrev, endrev):
822 start = self.start(startrev)
822 start = self.start(startrev)
823 length = self.end(endrev) - start
823 length = self.end(endrev) - start
824 if self._inline:
824 if self._inline:
825 start += (startrev + 1) * self._io.size
825 start += (startrev + 1) * self._io.size
826 return self._getchunk(start, length)
826 return self._getchunk(start, length)
827
827
828 def _chunk(self, rev):
828 def _chunk(self, rev):
829 return decompress(self._chunkraw(rev, rev))
829 return decompress(self._chunkraw(rev, rev))
830
830
831 def _chunkbase(self, rev):
831 def _chunkbase(self, rev):
832 return self._chunk(rev)
832 return self._chunk(rev)
833
833
834 def _chunkclear(self):
834 def _chunkclear(self):
835 self._chunkcache = (0, '')
835 self._chunkcache = (0, '')
836
836
837 def deltaparent(self, rev):
837 def deltaparent(self, rev):
838 """return deltaparent of the given revision"""
838 """return deltaparent of the given revision"""
839 base = self.index[rev][3]
839 base = self.index[rev][3]
840 if base == rev:
840 if base == rev:
841 return nullrev
841 return nullrev
842 elif self._generaldelta:
842 elif self._generaldelta:
843 return base
843 return base
844 else:
844 else:
845 return rev - 1
845 return rev - 1
846
846
847 def revdiff(self, rev1, rev2):
847 def revdiff(self, rev1, rev2):
848 """return or calculate a delta between two revisions"""
848 """return or calculate a delta between two revisions"""
849 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
849 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
850 return self._chunk(rev2)
850 return self._chunk(rev2)
851
851
852 return mdiff.textdiff(self.revision(self.node(rev1)),
852 return mdiff.textdiff(self.revision(self.node(rev1)),
853 self.revision(self.node(rev2)))
853 self.revision(self.node(rev2)))
854
854
855 def revision(self, node):
855 def revision(self, node):
856 """return an uncompressed revision of a given node"""
856 """return an uncompressed revision of a given node"""
857 cachedrev = None
857 cachedrev = None
858 if node == nullid:
858 if node == nullid:
859 return ""
859 return ""
860 if self._cache:
860 if self._cache:
861 if self._cache[0] == node:
861 if self._cache[0] == node:
862 return self._cache[2]
862 return self._cache[2]
863 cachedrev = self._cache[1]
863 cachedrev = self._cache[1]
864
864
865 # look up what we need to read
865 # look up what we need to read
866 text = None
866 text = None
867 rev = self.rev(node)
867 rev = self.rev(node)
868
868
869 # check rev flags
869 # check rev flags
870 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
870 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
871 raise RevlogError(_('incompatible revision flag %x') %
871 raise RevlogError(_('incompatible revision flag %x') %
872 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
872 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
873
873
874 # build delta chain
874 # build delta chain
875 chain = []
875 chain = []
876 index = self.index # for performance
876 index = self.index # for performance
877 generaldelta = self._generaldelta
877 generaldelta = self._generaldelta
878 iterrev = rev
878 iterrev = rev
879 e = index[iterrev]
879 e = index[iterrev]
880 while iterrev != e[3] and iterrev != cachedrev:
880 while iterrev != e[3] and iterrev != cachedrev:
881 chain.append(iterrev)
881 chain.append(iterrev)
882 if generaldelta:
882 if generaldelta:
883 iterrev = e[3]
883 iterrev = e[3]
884 else:
884 else:
885 iterrev -= 1
885 iterrev -= 1
886 e = index[iterrev]
886 e = index[iterrev]
887 chain.reverse()
887 chain.reverse()
888 base = iterrev
888 base = iterrev
889
889
890 if iterrev == cachedrev:
890 if iterrev == cachedrev:
891 # cache hit
891 # cache hit
892 text = self._cache[2]
892 text = self._cache[2]
893
893
894 # drop cache to save memory
894 # drop cache to save memory
895 self._cache = None
895 self._cache = None
896
896
897 self._chunkraw(base, rev)
897 self._chunkraw(base, rev)
898 if text is None:
898 if text is None:
899 text = self._chunkbase(base)
899 text = self._chunkbase(base)
900
900
901 bins = [self._chunk(r) for r in chain]
901 bins = [self._chunk(r) for r in chain]
902 text = mdiff.patches(text, bins)
902 text = mdiff.patches(text, bins)
903
903
904 text = self._checkhash(text, node, rev)
904 text = self._checkhash(text, node, rev)
905
905
906 self._cache = (node, rev, text)
906 self._cache = (node, rev, text)
907 return text
907 return text
908
908
909 def _checkhash(self, text, node, rev):
909 def _checkhash(self, text, node, rev):
910 p1, p2 = self.parents(node)
910 p1, p2 = self.parents(node)
911 if node != hash(text, p1, p2):
911 if node != hash(text, p1, p2):
912 raise RevlogError(_("integrity check failed on %s:%d")
912 raise RevlogError(_("integrity check failed on %s:%d")
913 % (self.indexfile, rev))
913 % (self.indexfile, rev))
914 return text
914 return text
915
915
916 def checkinlinesize(self, tr, fp=None):
916 def checkinlinesize(self, tr, fp=None):
917 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
917 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
918 return
918 return
919
919
920 trinfo = tr.find(self.indexfile)
920 trinfo = tr.find(self.indexfile)
921 if trinfo is None:
921 if trinfo is None:
922 raise RevlogError(_("%s not found in the transaction")
922 raise RevlogError(_("%s not found in the transaction")
923 % self.indexfile)
923 % self.indexfile)
924
924
925 trindex = trinfo[2]
925 trindex = trinfo[2]
926 dataoff = self.start(trindex)
926 dataoff = self.start(trindex)
927
927
928 tr.add(self.datafile, dataoff)
928 tr.add(self.datafile, dataoff)
929
929
930 if fp:
930 if fp:
931 fp.flush()
931 fp.flush()
932 fp.close()
932 fp.close()
933
933
934 df = self.opener(self.datafile, 'w')
934 df = self.opener(self.datafile, 'w')
935 try:
935 try:
936 for r in self:
936 for r in self:
937 df.write(self._chunkraw(r, r))
937 df.write(self._chunkraw(r, r))
938 finally:
938 finally:
939 df.close()
939 df.close()
940
940
941 fp = self.opener(self.indexfile, 'w', atomictemp=True)
941 fp = self.opener(self.indexfile, 'w', atomictemp=True)
942 self.version &= ~(REVLOGNGINLINEDATA)
942 self.version &= ~(REVLOGNGINLINEDATA)
943 self._inline = False
943 self._inline = False
944 for i in self:
944 for i in self:
945 e = self._io.packentry(self.index[i], self.node, self.version, i)
945 e = self._io.packentry(self.index[i], self.node, self.version, i)
946 fp.write(e)
946 fp.write(e)
947
947
948 # if we don't call rename, the temp file will never replace the
948 # if we don't call rename, the temp file will never replace the
949 # real index
949 # real index
950 fp.rename()
950 fp.rename()
951
951
952 tr.replace(self.indexfile, trindex * self._io.size)
952 tr.replace(self.indexfile, trindex * self._io.size)
953 self._chunkclear()
953 self._chunkclear()
954
954
955 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
955 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
956 """add a revision to the log
956 """add a revision to the log
957
957
958 text - the revision data to add
958 text - the revision data to add
959 transaction - the transaction object used for rollback
959 transaction - the transaction object used for rollback
960 link - the linkrev data to add
960 link - the linkrev data to add
961 p1, p2 - the parent nodeids of the revision
961 p1, p2 - the parent nodeids of the revision
962 cachedelta - an optional precomputed delta
962 cachedelta - an optional precomputed delta
963 """
963 """
964 node = hash(text, p1, p2)
964 node = hash(text, p1, p2)
965 if node in self.nodemap:
965 if node in self.nodemap:
966 return node
966 return node
967
967
968 dfh = None
968 dfh = None
969 if not self._inline:
969 if not self._inline:
970 dfh = self.opener(self.datafile, "a")
970 dfh = self.opener(self.datafile, "a")
971 ifh = self.opener(self.indexfile, "a+")
971 ifh = self.opener(self.indexfile, "a+")
972 try:
972 try:
973 return self._addrevision(node, text, transaction, link, p1, p2,
973 return self._addrevision(node, text, transaction, link, p1, p2,
974 cachedelta, ifh, dfh)
974 cachedelta, ifh, dfh)
975 finally:
975 finally:
976 if dfh:
976 if dfh:
977 dfh.close()
977 dfh.close()
978 ifh.close()
978 ifh.close()
979
979
980 def _addrevision(self, node, text, transaction, link, p1, p2,
980 def _addrevision(self, node, text, transaction, link, p1, p2,
981 cachedelta, ifh, dfh):
981 cachedelta, ifh, dfh):
982 """internal function to add revisions to the log
982 """internal function to add revisions to the log
983
983
984 see addrevision for argument descriptions.
984 see addrevision for argument descriptions.
985 invariants:
985 invariants:
986 - text is optional (can be None); if not set, cachedelta must be set.
986 - text is optional (can be None); if not set, cachedelta must be set.
987 if both are set, they must correspond to eachother.
987 if both are set, they must correspond to eachother.
988 """
988 """
989 btext = [text]
989 btext = [text]
990 def buildtext():
990 def buildtext():
991 if btext[0] is not None:
991 if btext[0] is not None:
992 return btext[0]
992 return btext[0]
993 # flush any pending writes here so we can read it in revision
993 # flush any pending writes here so we can read it in revision
994 if dfh:
994 if dfh:
995 dfh.flush()
995 dfh.flush()
996 ifh.flush()
996 ifh.flush()
997 basetext = self.revision(self.node(cachedelta[0]))
997 basetext = self.revision(self.node(cachedelta[0]))
998 btext[0] = mdiff.patch(basetext, cachedelta[1])
998 btext[0] = mdiff.patch(basetext, cachedelta[1])
999 chk = hash(btext[0], p1, p2)
999 chk = hash(btext[0], p1, p2)
1000 if chk != node:
1000 if chk != node:
1001 raise RevlogError(_("consistency error in delta"))
1001 raise RevlogError(_("consistency error in delta"))
1002 return btext[0]
1002 return btext[0]
1003
1003
1004 def builddelta(rev):
1004 def builddelta(rev):
1005 # can we use the cached delta?
1005 # can we use the cached delta?
1006 if cachedelta and cachedelta[0] == rev:
1006 if cachedelta and cachedelta[0] == rev:
1007 delta = cachedelta[1]
1007 delta = cachedelta[1]
1008 else:
1008 else:
1009 t = buildtext()
1009 t = buildtext()
1010 ptext = self.revision(self.node(rev))
1010 ptext = self.revision(self.node(rev))
1011 delta = mdiff.textdiff(ptext, t)
1011 delta = mdiff.textdiff(ptext, t)
1012 data = compress(delta)
1012 data = compress(delta)
1013 l = len(data[1]) + len(data[0])
1013 l = len(data[1]) + len(data[0])
1014 if basecache[0] == rev:
1014 if basecache[0] == rev:
1015 chainbase = basecache[1]
1015 chainbase = basecache[1]
1016 else:
1016 else:
1017 chainbase = self.chainbase(rev)
1017 chainbase = self.chainbase(rev)
1018 dist = l + offset - self.start(chainbase)
1018 dist = l + offset - self.start(chainbase)
1019 if self._generaldelta:
1019 if self._generaldelta:
1020 base = rev
1020 base = rev
1021 else:
1021 else:
1022 base = chainbase
1022 base = chainbase
1023 return dist, l, data, base, chainbase
1023 return dist, l, data, base, chainbase
1024
1024
1025 curr = len(self)
1025 curr = len(self)
1026 prev = curr - 1
1026 prev = curr - 1
1027 base = chainbase = curr
1027 base = chainbase = curr
1028 offset = self.end(prev)
1028 offset = self.end(prev)
1029 flags = 0
1029 flags = 0
1030 d = None
1030 d = None
1031 basecache = self._basecache
1031 basecache = self._basecache
1032 p1r, p2r = self.rev(p1), self.rev(p2)
1032 p1r, p2r = self.rev(p1), self.rev(p2)
1033
1033
1034 # should we try to build a delta?
1034 # should we try to build a delta?
1035 if prev != nullrev:
1035 if prev != nullrev:
1036 if self._generaldelta:
1036 if self._generaldelta:
1037 if p1r >= basecache[1]:
1037 if p1r >= basecache[1]:
1038 d = builddelta(p1r)
1038 d = builddelta(p1r)
1039 elif p2r >= basecache[1]:
1039 elif p2r >= basecache[1]:
1040 d = builddelta(p2r)
1040 d = builddelta(p2r)
1041 else:
1041 else:
1042 d = builddelta(prev)
1042 d = builddelta(prev)
1043 else:
1043 else:
1044 d = builddelta(prev)
1044 d = builddelta(prev)
1045 dist, l, data, base, chainbase = d
1045 dist, l, data, base, chainbase = d
1046
1046
1047 # full versions are inserted when the needed deltas
1047 # full versions are inserted when the needed deltas
1048 # become comparable to the uncompressed text
1048 # become comparable to the uncompressed text
1049 if text is None:
1049 if text is None:
1050 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1050 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1051 cachedelta[1])
1051 cachedelta[1])
1052 else:
1052 else:
1053 textlen = len(text)
1053 textlen = len(text)
1054 if d is None or dist > textlen * 2:
1054 if d is None or dist > textlen * 2:
1055 text = buildtext()
1055 text = buildtext()
1056 data = compress(text)
1056 data = compress(text)
1057 l = len(data[1]) + len(data[0])
1057 l = len(data[1]) + len(data[0])
1058 base = chainbase = curr
1058 base = chainbase = curr
1059
1059
1060 e = (offset_type(offset, flags), l, textlen,
1060 e = (offset_type(offset, flags), l, textlen,
1061 base, link, p1r, p2r, node)
1061 base, link, p1r, p2r, node)
1062 self.index.insert(-1, e)
1062 self.index.insert(-1, e)
1063 self.nodemap[node] = curr
1063 self.nodemap[node] = curr
1064
1064
1065 entry = self._io.packentry(e, self.node, self.version, curr)
1065 entry = self._io.packentry(e, self.node, self.version, curr)
1066 if not self._inline:
1066 if not self._inline:
1067 transaction.add(self.datafile, offset)
1067 transaction.add(self.datafile, offset)
1068 transaction.add(self.indexfile, curr * len(entry))
1068 transaction.add(self.indexfile, curr * len(entry))
1069 if data[0]:
1069 if data[0]:
1070 dfh.write(data[0])
1070 dfh.write(data[0])
1071 dfh.write(data[1])
1071 dfh.write(data[1])
1072 dfh.flush()
1072 dfh.flush()
1073 ifh.write(entry)
1073 ifh.write(entry)
1074 else:
1074 else:
1075 offset += curr * self._io.size
1075 offset += curr * self._io.size
1076 transaction.add(self.indexfile, offset, curr)
1076 transaction.add(self.indexfile, offset, curr)
1077 ifh.write(entry)
1077 ifh.write(entry)
1078 ifh.write(data[0])
1078 ifh.write(data[0])
1079 ifh.write(data[1])
1079 ifh.write(data[1])
1080 self.checkinlinesize(transaction, ifh)
1080 self.checkinlinesize(transaction, ifh)
1081
1081
1082 if type(text) == str: # only accept immutable objects
1082 if type(text) == str: # only accept immutable objects
1083 self._cache = (node, curr, text)
1083 self._cache = (node, curr, text)
1084 self._basecache = (curr, chainbase)
1084 self._basecache = (curr, chainbase)
1085 return node
1085 return node
1086
1086
1087 def group(self, nodelist, bundler, reorder=None):
1087 def group(self, nodelist, bundler, reorder=None):
1088 """Calculate a delta group, yielding a sequence of changegroup chunks
1088 """Calculate a delta group, yielding a sequence of changegroup chunks
1089 (strings).
1089 (strings).
1090
1090
1091 Given a list of changeset revs, return a set of deltas and
1091 Given a list of changeset revs, return a set of deltas and
1092 metadata corresponding to nodes. The first delta is
1092 metadata corresponding to nodes. The first delta is
1093 first parent(nodelist[0]) -> nodelist[0], the receiver is
1093 first parent(nodelist[0]) -> nodelist[0], the receiver is
1094 guaranteed to have this parent as it has all history before
1094 guaranteed to have this parent as it has all history before
1095 these changesets. In the case firstparent is nullrev the
1095 these changesets. In the case firstparent is nullrev the
1096 changegroup starts with a full revision.
1096 changegroup starts with a full revision.
1097 """
1097 """
1098
1098
1099 # for generaldelta revlogs, we linearize the revs; this will both be
1099 # for generaldelta revlogs, we linearize the revs; this will both be
1100 # much quicker and generate a much smaller bundle
1100 # much quicker and generate a much smaller bundle
1101 if (self._generaldelta and reorder is not False) or reorder:
1101 if (self._generaldelta and reorder is not False) or reorder:
1102 dag = dagutil.revlogdag(self)
1102 dag = dagutil.revlogdag(self)
1103 revs = set(self.rev(n) for n in nodelist)
1103 revs = set(self.rev(n) for n in nodelist)
1104 revs = dag.linearize(revs)
1104 revs = dag.linearize(revs)
1105 else:
1105 else:
1106 revs = sorted([self.rev(n) for n in nodelist])
1106 revs = sorted([self.rev(n) for n in nodelist])
1107
1107
1108 # if we don't have any revisions touched by these changesets, bail
1108 # if we don't have any revisions touched by these changesets, bail
1109 if not revs:
1109 if not revs:
1110 yield bundler.close()
1110 yield bundler.close()
1111 return
1111 return
1112
1112
1113 # add the parent of the first rev
1113 # add the parent of the first rev
1114 p = self.parentrevs(revs[0])[0]
1114 p = self.parentrevs(revs[0])[0]
1115 revs.insert(0, p)
1115 revs.insert(0, p)
1116
1116
1117 # build deltas
1117 # build deltas
1118 for r in xrange(len(revs) - 1):
1118 for r in xrange(len(revs) - 1):
1119 prev, curr = revs[r], revs[r + 1]
1119 prev, curr = revs[r], revs[r + 1]
1120 for c in bundler.revchunk(self, curr, prev):
1120 for c in bundler.revchunk(self, curr, prev):
1121 yield c
1121 yield c
1122
1122
1123 yield bundler.close()
1123 yield bundler.close()
1124
1124
1125 def addgroup(self, bundle, linkmapper, transaction):
1125 def addgroup(self, bundle, linkmapper, transaction):
1126 """
1126 """
1127 add a delta group
1127 add a delta group
1128
1128
1129 given a set of deltas, add them to the revision log. the
1129 given a set of deltas, add them to the revision log. the
1130 first delta is against its parent, which should be in our
1130 first delta is against its parent, which should be in our
1131 log, the rest are against the previous delta.
1131 log, the rest are against the previous delta.
1132 """
1132 """
1133
1133
1134 # track the base of the current delta log
1134 # track the base of the current delta log
1135 node = None
1135 node = None
1136
1136
1137 r = len(self)
1137 r = len(self)
1138 end = 0
1138 end = 0
1139 if r:
1139 if r:
1140 end = self.end(r - 1)
1140 end = self.end(r - 1)
1141 ifh = self.opener(self.indexfile, "a+")
1141 ifh = self.opener(self.indexfile, "a+")
1142 isize = r * self._io.size
1142 isize = r * self._io.size
1143 if self._inline:
1143 if self._inline:
1144 transaction.add(self.indexfile, end + isize, r)
1144 transaction.add(self.indexfile, end + isize, r)
1145 dfh = None
1145 dfh = None
1146 else:
1146 else:
1147 transaction.add(self.indexfile, isize, r)
1147 transaction.add(self.indexfile, isize, r)
1148 transaction.add(self.datafile, end)
1148 transaction.add(self.datafile, end)
1149 dfh = self.opener(self.datafile, "a")
1149 dfh = self.opener(self.datafile, "a")
1150
1150
1151 try:
1151 try:
1152 # loop through our set of deltas
1152 # loop through our set of deltas
1153 chain = None
1153 chain = None
1154 while 1:
1154 while 1:
1155 chunkdata = bundle.deltachunk(chain)
1155 chunkdata = bundle.deltachunk(chain)
1156 if not chunkdata:
1156 if not chunkdata:
1157 break
1157 break
1158 node = chunkdata['node']
1158 node = chunkdata['node']
1159 p1 = chunkdata['p1']
1159 p1 = chunkdata['p1']
1160 p2 = chunkdata['p2']
1160 p2 = chunkdata['p2']
1161 cs = chunkdata['cs']
1161 cs = chunkdata['cs']
1162 deltabase = chunkdata['deltabase']
1162 deltabase = chunkdata['deltabase']
1163 delta = chunkdata['delta']
1163 delta = chunkdata['delta']
1164
1164
1165 link = linkmapper(cs)
1165 link = linkmapper(cs)
1166 if node in self.nodemap:
1166 if node in self.nodemap:
1167 # this can happen if two branches make the same change
1167 # this can happen if two branches make the same change
1168 chain = node
1168 chain = node
1169 continue
1169 continue
1170
1170
1171 for p in (p1, p2):
1171 for p in (p1, p2):
1172 if not p in self.nodemap:
1172 if not p in self.nodemap:
1173 raise LookupError(p, self.indexfile,
1173 raise LookupError(p, self.indexfile,
1174 _('unknown parent'))
1174 _('unknown parent'))
1175
1175
1176 if deltabase not in self.nodemap:
1176 if deltabase not in self.nodemap:
1177 raise LookupError(deltabase, self.indexfile,
1177 raise LookupError(deltabase, self.indexfile,
1178 _('unknown delta base'))
1178 _('unknown delta base'))
1179
1179
1180 baserev = self.rev(deltabase)
1180 baserev = self.rev(deltabase)
1181 chain = self._addrevision(node, None, transaction, link,
1181 chain = self._addrevision(node, None, transaction, link,
1182 p1, p2, (baserev, delta), ifh, dfh)
1182 p1, p2, (baserev, delta), ifh, dfh)
1183 if not dfh and not self._inline:
1183 if not dfh and not self._inline:
1184 # addrevision switched from inline to conventional
1184 # addrevision switched from inline to conventional
1185 # reopen the index
1185 # reopen the index
1186 ifh.close()
1186 ifh.close()
1187 dfh = self.opener(self.datafile, "a")
1187 dfh = self.opener(self.datafile, "a")
1188 ifh = self.opener(self.indexfile, "a")
1188 ifh = self.opener(self.indexfile, "a")
1189 finally:
1189 finally:
1190 if dfh:
1190 if dfh:
1191 dfh.close()
1191 dfh.close()
1192 ifh.close()
1192 ifh.close()
1193
1193
1194 return node
1194 return node
1195
1195
1196 def strip(self, minlink, transaction):
1196 def strip(self, minlink, transaction):
1197 """truncate the revlog on the first revision with a linkrev >= minlink
1197 """truncate the revlog on the first revision with a linkrev >= minlink
1198
1198
1199 This function is called when we're stripping revision minlink and
1199 This function is called when we're stripping revision minlink and
1200 its descendants from the repository.
1200 its descendants from the repository.
1201
1201
1202 We have to remove all revisions with linkrev >= minlink, because
1202 We have to remove all revisions with linkrev >= minlink, because
1203 the equivalent changelog revisions will be renumbered after the
1203 the equivalent changelog revisions will be renumbered after the
1204 strip.
1204 strip.
1205
1205
1206 So we truncate the revlog on the first of these revisions, and
1206 So we truncate the revlog on the first of these revisions, and
1207 trust that the caller has saved the revisions that shouldn't be
1207 trust that the caller has saved the revisions that shouldn't be
1208 removed and that it'll readd them after this truncation.
1208 removed and that it'll readd them after this truncation.
1209 """
1209 """
1210 if len(self) == 0:
1210 if len(self) == 0:
1211 return
1211 return
1212
1212
1213 for rev in self:
1213 for rev in self:
1214 if self.index[rev][4] >= minlink:
1214 if self.index[rev][4] >= minlink:
1215 break
1215 break
1216 else:
1216 else:
1217 return
1217 return
1218
1218
1219 # first truncate the files on disk
1219 # first truncate the files on disk
1220 end = self.start(rev)
1220 end = self.start(rev)
1221 if not self._inline:
1221 if not self._inline:
1222 transaction.add(self.datafile, end)
1222 transaction.add(self.datafile, end)
1223 end = rev * self._io.size
1223 end = rev * self._io.size
1224 else:
1224 else:
1225 end += rev * self._io.size
1225 end += rev * self._io.size
1226
1226
1227 transaction.add(self.indexfile, end)
1227 transaction.add(self.indexfile, end)
1228
1228
1229 # then reset internal state in memory to forget those revisions
1229 # then reset internal state in memory to forget those revisions
1230 self._cache = None
1230 self._cache = None
1231 self._chunkclear()
1231 self._chunkclear()
1232 for x in xrange(rev, len(self)):
1232 for x in xrange(rev, len(self)):
1233 del self.nodemap[self.node(x)]
1233 del self.nodemap[self.node(x)]
1234
1234
1235 del self.index[rev:-1]
1235 del self.index[rev:-1]
1236
1236
1237 def checksize(self):
1237 def checksize(self):
1238 expected = 0
1238 expected = 0
1239 if len(self):
1239 if len(self):
1240 expected = max(0, self.end(len(self) - 1))
1240 expected = max(0, self.end(len(self) - 1))
1241
1241
1242 try:
1242 try:
1243 f = self.opener(self.datafile)
1243 f = self.opener(self.datafile)
1244 f.seek(0, 2)
1244 f.seek(0, 2)
1245 actual = f.tell()
1245 actual = f.tell()
1246 f.close()
1246 f.close()
1247 dd = actual - expected
1247 dd = actual - expected
1248 except IOError, inst:
1248 except IOError, inst:
1249 if inst.errno != errno.ENOENT:
1249 if inst.errno != errno.ENOENT:
1250 raise
1250 raise
1251 dd = 0
1251 dd = 0
1252
1252
1253 try:
1253 try:
1254 f = self.opener(self.indexfile)
1254 f = self.opener(self.indexfile)
1255 f.seek(0, 2)
1255 f.seek(0, 2)
1256 actual = f.tell()
1256 actual = f.tell()
1257 f.close()
1257 f.close()
1258 s = self._io.size
1258 s = self._io.size
1259 i = max(0, actual // s)
1259 i = max(0, actual // s)
1260 di = actual - (i * s)
1260 di = actual - (i * s)
1261 if self._inline:
1261 if self._inline:
1262 databytes = 0
1262 databytes = 0
1263 for r in self:
1263 for r in self:
1264 databytes += max(0, self.length(r))
1264 databytes += max(0, self.length(r))
1265 dd = 0
1265 dd = 0
1266 di = actual - len(self) * s - databytes
1266 di = actual - len(self) * s - databytes
1267 except IOError, inst:
1267 except IOError, inst:
1268 if inst.errno != errno.ENOENT:
1268 if inst.errno != errno.ENOENT:
1269 raise
1269 raise
1270 di = 0
1270 di = 0
1271
1271
1272 return (dd, di)
1272 return (dd, di)
1273
1273
1274 def files(self):
1274 def files(self):
1275 res = [self.indexfile]
1275 res = [self.indexfile]
1276 if not self._inline:
1276 if not self._inline:
1277 res.append(self.datafile)
1277 res.append(self.datafile)
1278 return res
1278 return res
@@ -1,15 +1,14 b''
1 $ "$TESTDIR/hghave" pyflakes || exit 80
1 $ "$TESTDIR/hghave" pyflakes || exit 80
2 $ cd $(dirname $TESTDIR)
2 $ cd $(dirname $TESTDIR)
3 $ pyflakes mercurial hgext 2>&1 | $TESTDIR/filterpyflakes.py
3 $ pyflakes mercurial hgext 2>&1 | $TESTDIR/filterpyflakes.py
4 mercurial/hgweb/server.py:*: 'activeCount' imported but unused (glob)
4 mercurial/hgweb/server.py:*: 'activeCount' imported but unused (glob)
5 mercurial/commands.py:*: 'base85' imported but unused (glob)
5 mercurial/commands.py:*: 'base85' imported but unused (glob)
6 mercurial/commands.py:*: 'bdiff' imported but unused (glob)
6 mercurial/commands.py:*: 'bdiff' imported but unused (glob)
7 mercurial/commands.py:*: 'mpatch' imported but unused (glob)
7 mercurial/commands.py:*: 'mpatch' imported but unused (glob)
8 mercurial/commands.py:*: 'osutil' imported but unused (glob)
8 mercurial/commands.py:*: 'osutil' imported but unused (glob)
9 mercurial/revlog.py:*: 'short' imported but unused (glob)
10 hgext/inotify/linux/__init__.py:*: 'from _inotify import *' used; unable to detect undefined names (glob)
9 hgext/inotify/linux/__init__.py:*: 'from _inotify import *' used; unable to detect undefined names (glob)
11 mercurial/util.py:*: 'from posix import *' used; unable to detect undefined names (glob)
10 mercurial/util.py:*: 'from posix import *' used; unable to detect undefined names (glob)
12 mercurial/windows.py:*: 'from win32 import *' used; unable to detect undefined names (glob)
11 mercurial/windows.py:*: 'from win32 import *' used; unable to detect undefined names (glob)
13 mercurial/util.py:*: 'from windows import *' used; unable to detect undefined names (glob)
12 mercurial/util.py:*: 'from windows import *' used; unable to detect undefined names (glob)
14
13
15
14
General Comments 0
You need to be logged in to leave comments. Login now