##// END OF EJS Templates
Only look up tags and branches as a last resort
Matt Mackall -
r3453:dba3cade default
parent child Browse files
Show More
@@ -1,1817 +1,1820 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
203 return
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
212 self.add(['.hgtags'])
213
213
214 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216
216
217 def tags(self):
217 def tags(self):
218 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
219 if not self.tagscache:
219 if not self.tagscache:
220 self.tagscache = {}
220 self.tagscache = {}
221
221
222 def parsetag(line, context):
222 def parsetag(line, context):
223 if not line:
223 if not line:
224 return
224 return
225 s = l.split(" ", 1)
225 s = l.split(" ", 1)
226 if len(s) != 2:
226 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
228 return
229 node, key = s
229 node, key = s
230 key = key.strip()
230 key = key.strip()
231 try:
231 try:
232 bin_n = bin(node)
232 bin_n = bin(node)
233 except TypeError:
233 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
235 (context, node))
236 return
236 return
237 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
239 (context, key))
240 return
240 return
241 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
242
242
243 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
245 # taking precedence
245 # taking precedence
246 heads = self.heads()
246 heads = self.heads()
247 heads.reverse()
247 heads.reverse()
248 fl = self.file(".hgtags")
248 fl = self.file(".hgtags")
249 for node in heads:
249 for node in heads:
250 change = self.changelog.read(node)
250 change = self.changelog.read(node)
251 rev = self.changelog.rev(node)
251 rev = self.changelog.rev(node)
252 fn, ff = self.manifest.find(change[0], '.hgtags')
252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 if fn is None: continue
253 if fn is None: continue
254 count = 0
254 count = 0
255 for l in fl.read(fn).splitlines():
255 for l in fl.read(fn).splitlines():
256 count += 1
256 count += 1
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 (rev, short(node), count))
258 (rev, short(node), count))
259 try:
259 try:
260 f = self.opener("localtags")
260 f = self.opener("localtags")
261 count = 0
261 count = 0
262 for l in f:
262 for l in f:
263 count += 1
263 count += 1
264 parsetag(l, _("localtags, line %d") % count)
264 parsetag(l, _("localtags, line %d") % count)
265 except IOError:
265 except IOError:
266 pass
266 pass
267
267
268 self.tagscache['tip'] = self.changelog.tip()
268 self.tagscache['tip'] = self.changelog.tip()
269
269
270 return self.tagscache
270 return self.tagscache
271
271
272 def tagslist(self):
272 def tagslist(self):
273 '''return a list of tags ordered by revision'''
273 '''return a list of tags ordered by revision'''
274 l = []
274 l = []
275 for t, n in self.tags().items():
275 for t, n in self.tags().items():
276 try:
276 try:
277 r = self.changelog.rev(n)
277 r = self.changelog.rev(n)
278 except:
278 except:
279 r = -2 # sort to the beginning of the list if unknown
279 r = -2 # sort to the beginning of the list if unknown
280 l.append((r, t, n))
280 l.append((r, t, n))
281 l.sort()
281 l.sort()
282 return [(t, n) for r, t, n in l]
282 return [(t, n) for r, t, n in l]
283
283
284 def nodetags(self, node):
284 def nodetags(self, node):
285 '''return the tags associated with a node'''
285 '''return the tags associated with a node'''
286 if not self.nodetagscache:
286 if not self.nodetagscache:
287 self.nodetagscache = {}
287 self.nodetagscache = {}
288 for t, n in self.tags().items():
288 for t, n in self.tags().items():
289 self.nodetagscache.setdefault(n, []).append(t)
289 self.nodetagscache.setdefault(n, []).append(t)
290 return self.nodetagscache.get(node, [])
290 return self.nodetagscache.get(node, [])
291
291
292 def branchtags(self):
292 def branchtags(self):
293 if self.branchcache != None:
293 if self.branchcache != None:
294 return self.branchcache
294 return self.branchcache
295
295
296 self.branchcache = {} # avoid recursion in changectx
296 self.branchcache = {} # avoid recursion in changectx
297
297
298 try:
298 try:
299 f = self.opener("branches.cache")
299 f = self.opener("branches.cache")
300 last, lrev = f.readline().rstrip().split(" ", 1)
300 last, lrev = f.readline().rstrip().split(" ", 1)
301 last, lrev = bin(last), int(lrev)
301 last, lrev = bin(last), int(lrev)
302 if (lrev < self.changelog.count() and
302 if (lrev < self.changelog.count() and
303 self.changelog.node(lrev) == last): # sanity check
303 self.changelog.node(lrev) == last): # sanity check
304 for l in f:
304 for l in f:
305 node, label = l.rstrip().split(" ", 1)
305 node, label = l.rstrip().split(" ", 1)
306 self.branchcache[label] = bin(node)
306 self.branchcache[label] = bin(node)
307 else: # invalidate the cache
307 else: # invalidate the cache
308 last, lrev = nullid, -1
308 last, lrev = nullid, -1
309 f.close()
309 f.close()
310 except IOError:
310 except IOError:
311 last, lrev = nullid, -1
311 last, lrev = nullid, -1
312
312
313 tip = self.changelog.count() - 1
313 tip = self.changelog.count() - 1
314 if lrev != tip:
314 if lrev != tip:
315 for r in xrange(lrev + 1, tip + 1):
315 for r in xrange(lrev + 1, tip + 1):
316 c = self.changectx(r)
316 c = self.changectx(r)
317 b = c.branch()
317 b = c.branch()
318 if b:
318 if b:
319 self.branchcache[b] = c.node()
319 self.branchcache[b] = c.node()
320 self._writebranchcache()
320 self._writebranchcache()
321
321
322 return self.branchcache
322 return self.branchcache
323
323
324 def _writebranchcache(self):
324 def _writebranchcache(self):
325 try:
325 try:
326 f = self.opener("branches.cache", "w")
326 f = self.opener("branches.cache", "w")
327 t = self.changelog.tip()
327 t = self.changelog.tip()
328 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
328 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
329 for label, node in self.branchcache.iteritems():
329 for label, node in self.branchcache.iteritems():
330 f.write("%s %s\n" % (hex(node), label))
330 f.write("%s %s\n" % (hex(node), label))
331 except IOError:
331 except IOError:
332 pass
332 pass
333
333
334 def lookup(self, key):
334 def lookup(self, key):
335 if key == '.':
335 if key == '.':
336 key = self.dirstate.parents()[0]
336 key = self.dirstate.parents()[0]
337 if key == nullid:
337 if key == nullid:
338 raise repo.RepoError(_("no revision checked out"))
338 raise repo.RepoError(_("no revision checked out"))
339 n = self.changelog._match(key)
340 if n:
341 return n
339 if key in self.tags():
342 if key in self.tags():
340 return self.tags()[key]
343 return self.tags()[key]
341 if key in self.branchtags():
344 if key in self.branchtags():
342 return self.branchtags()[key]
345 return self.branchtags()[key]
343 try:
346 n = self.changelog._partialmatch(key)
344 return self.changelog.lookup(key)
347 if n:
345 except:
348 return n
346 raise repo.RepoError(_("unknown revision '%s'") % key)
349 raise repo.RepoError(_("unknown revision '%s'") % key)
347
350
348 def dev(self):
351 def dev(self):
349 return os.lstat(self.path).st_dev
352 return os.lstat(self.path).st_dev
350
353
351 def local(self):
354 def local(self):
352 return True
355 return True
353
356
354 def join(self, f):
357 def join(self, f):
355 return os.path.join(self.path, f)
358 return os.path.join(self.path, f)
356
359
357 def wjoin(self, f):
360 def wjoin(self, f):
358 return os.path.join(self.root, f)
361 return os.path.join(self.root, f)
359
362
360 def file(self, f):
363 def file(self, f):
361 if f[0] == '/':
364 if f[0] == '/':
362 f = f[1:]
365 f = f[1:]
363 return filelog.filelog(self.opener, f, self.revlogversion)
366 return filelog.filelog(self.opener, f, self.revlogversion)
364
367
365 def changectx(self, changeid=None):
368 def changectx(self, changeid=None):
366 return context.changectx(self, changeid)
369 return context.changectx(self, changeid)
367
370
368 def workingctx(self):
371 def workingctx(self):
369 return context.workingctx(self)
372 return context.workingctx(self)
370
373
371 def parents(self, changeid=None):
374 def parents(self, changeid=None):
372 '''
375 '''
373 get list of changectxs for parents of changeid or working directory
376 get list of changectxs for parents of changeid or working directory
374 '''
377 '''
375 if changeid is None:
378 if changeid is None:
376 pl = self.dirstate.parents()
379 pl = self.dirstate.parents()
377 else:
380 else:
378 n = self.changelog.lookup(changeid)
381 n = self.changelog.lookup(changeid)
379 pl = self.changelog.parents(n)
382 pl = self.changelog.parents(n)
380 if pl[1] == nullid:
383 if pl[1] == nullid:
381 return [self.changectx(pl[0])]
384 return [self.changectx(pl[0])]
382 return [self.changectx(pl[0]), self.changectx(pl[1])]
385 return [self.changectx(pl[0]), self.changectx(pl[1])]
383
386
384 def filectx(self, path, changeid=None, fileid=None):
387 def filectx(self, path, changeid=None, fileid=None):
385 """changeid can be a changeset revision, node, or tag.
388 """changeid can be a changeset revision, node, or tag.
386 fileid can be a file revision or node."""
389 fileid can be a file revision or node."""
387 return context.filectx(self, path, changeid, fileid)
390 return context.filectx(self, path, changeid, fileid)
388
391
389 def getcwd(self):
392 def getcwd(self):
390 return self.dirstate.getcwd()
393 return self.dirstate.getcwd()
391
394
392 def wfile(self, f, mode='r'):
395 def wfile(self, f, mode='r'):
393 return self.wopener(f, mode)
396 return self.wopener(f, mode)
394
397
395 def wread(self, filename):
398 def wread(self, filename):
396 if self.encodepats == None:
399 if self.encodepats == None:
397 l = []
400 l = []
398 for pat, cmd in self.ui.configitems("encode"):
401 for pat, cmd in self.ui.configitems("encode"):
399 mf = util.matcher(self.root, "", [pat], [], [])[1]
402 mf = util.matcher(self.root, "", [pat], [], [])[1]
400 l.append((mf, cmd))
403 l.append((mf, cmd))
401 self.encodepats = l
404 self.encodepats = l
402
405
403 data = self.wopener(filename, 'r').read()
406 data = self.wopener(filename, 'r').read()
404
407
405 for mf, cmd in self.encodepats:
408 for mf, cmd in self.encodepats:
406 if mf(filename):
409 if mf(filename):
407 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
410 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
408 data = util.filter(data, cmd)
411 data = util.filter(data, cmd)
409 break
412 break
410
413
411 return data
414 return data
412
415
413 def wwrite(self, filename, data, fd=None):
416 def wwrite(self, filename, data, fd=None):
414 if self.decodepats == None:
417 if self.decodepats == None:
415 l = []
418 l = []
416 for pat, cmd in self.ui.configitems("decode"):
419 for pat, cmd in self.ui.configitems("decode"):
417 mf = util.matcher(self.root, "", [pat], [], [])[1]
420 mf = util.matcher(self.root, "", [pat], [], [])[1]
418 l.append((mf, cmd))
421 l.append((mf, cmd))
419 self.decodepats = l
422 self.decodepats = l
420
423
421 for mf, cmd in self.decodepats:
424 for mf, cmd in self.decodepats:
422 if mf(filename):
425 if mf(filename):
423 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
426 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
424 data = util.filter(data, cmd)
427 data = util.filter(data, cmd)
425 break
428 break
426
429
427 if fd:
430 if fd:
428 return fd.write(data)
431 return fd.write(data)
429 return self.wopener(filename, 'w').write(data)
432 return self.wopener(filename, 'w').write(data)
430
433
431 def transaction(self):
434 def transaction(self):
432 tr = self.transhandle
435 tr = self.transhandle
433 if tr != None and tr.running():
436 if tr != None and tr.running():
434 return tr.nest()
437 return tr.nest()
435
438
436 # save dirstate for rollback
439 # save dirstate for rollback
437 try:
440 try:
438 ds = self.opener("dirstate").read()
441 ds = self.opener("dirstate").read()
439 except IOError:
442 except IOError:
440 ds = ""
443 ds = ""
441 self.opener("journal.dirstate", "w").write(ds)
444 self.opener("journal.dirstate", "w").write(ds)
442
445
443 tr = transaction.transaction(self.ui.warn, self.opener,
446 tr = transaction.transaction(self.ui.warn, self.opener,
444 self.join("journal"),
447 self.join("journal"),
445 aftertrans(self.path))
448 aftertrans(self.path))
446 self.transhandle = tr
449 self.transhandle = tr
447 return tr
450 return tr
448
451
449 def recover(self):
452 def recover(self):
450 l = self.lock()
453 l = self.lock()
451 if os.path.exists(self.join("journal")):
454 if os.path.exists(self.join("journal")):
452 self.ui.status(_("rolling back interrupted transaction\n"))
455 self.ui.status(_("rolling back interrupted transaction\n"))
453 transaction.rollback(self.opener, self.join("journal"))
456 transaction.rollback(self.opener, self.join("journal"))
454 self.reload()
457 self.reload()
455 return True
458 return True
456 else:
459 else:
457 self.ui.warn(_("no interrupted transaction available\n"))
460 self.ui.warn(_("no interrupted transaction available\n"))
458 return False
461 return False
459
462
460 def rollback(self, wlock=None):
463 def rollback(self, wlock=None):
461 if not wlock:
464 if not wlock:
462 wlock = self.wlock()
465 wlock = self.wlock()
463 l = self.lock()
466 l = self.lock()
464 if os.path.exists(self.join("undo")):
467 if os.path.exists(self.join("undo")):
465 self.ui.status(_("rolling back last transaction\n"))
468 self.ui.status(_("rolling back last transaction\n"))
466 transaction.rollback(self.opener, self.join("undo"))
469 transaction.rollback(self.opener, self.join("undo"))
467 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
470 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
468 self.reload()
471 self.reload()
469 self.wreload()
472 self.wreload()
470 else:
473 else:
471 self.ui.warn(_("no rollback information available\n"))
474 self.ui.warn(_("no rollback information available\n"))
472
475
473 def wreload(self):
476 def wreload(self):
474 self.dirstate.read()
477 self.dirstate.read()
475
478
476 def reload(self):
479 def reload(self):
477 self.changelog.load()
480 self.changelog.load()
478 self.manifest.load()
481 self.manifest.load()
479 self.tagscache = None
482 self.tagscache = None
480 self.nodetagscache = None
483 self.nodetagscache = None
481
484
482 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
485 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
483 desc=None):
486 desc=None):
484 try:
487 try:
485 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
488 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
486 except lock.LockHeld, inst:
489 except lock.LockHeld, inst:
487 if not wait:
490 if not wait:
488 raise
491 raise
489 self.ui.warn(_("waiting for lock on %s held by %s\n") %
492 self.ui.warn(_("waiting for lock on %s held by %s\n") %
490 (desc, inst.args[0]))
493 (desc, inst.args[0]))
491 # default to 600 seconds timeout
494 # default to 600 seconds timeout
492 l = lock.lock(self.join(lockname),
495 l = lock.lock(self.join(lockname),
493 int(self.ui.config("ui", "timeout") or 600),
496 int(self.ui.config("ui", "timeout") or 600),
494 releasefn, desc=desc)
497 releasefn, desc=desc)
495 if acquirefn:
498 if acquirefn:
496 acquirefn()
499 acquirefn()
497 return l
500 return l
498
501
499 def lock(self, wait=1):
502 def lock(self, wait=1):
500 return self.do_lock("lock", wait, acquirefn=self.reload,
503 return self.do_lock("lock", wait, acquirefn=self.reload,
501 desc=_('repository %s') % self.origroot)
504 desc=_('repository %s') % self.origroot)
502
505
503 def wlock(self, wait=1):
506 def wlock(self, wait=1):
504 return self.do_lock("wlock", wait, self.dirstate.write,
507 return self.do_lock("wlock", wait, self.dirstate.write,
505 self.wreload,
508 self.wreload,
506 desc=_('working directory of %s') % self.origroot)
509 desc=_('working directory of %s') % self.origroot)
507
510
508 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
511 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
509 """
512 """
510 commit an individual file as part of a larger transaction
513 commit an individual file as part of a larger transaction
511 """
514 """
512
515
513 t = self.wread(fn)
516 t = self.wread(fn)
514 fl = self.file(fn)
517 fl = self.file(fn)
515 fp1 = manifest1.get(fn, nullid)
518 fp1 = manifest1.get(fn, nullid)
516 fp2 = manifest2.get(fn, nullid)
519 fp2 = manifest2.get(fn, nullid)
517
520
518 meta = {}
521 meta = {}
519 cp = self.dirstate.copied(fn)
522 cp = self.dirstate.copied(fn)
520 if cp:
523 if cp:
521 meta["copy"] = cp
524 meta["copy"] = cp
522 if not manifest2: # not a branch merge
525 if not manifest2: # not a branch merge
523 meta["copyrev"] = hex(manifest1.get(cp, nullid))
526 meta["copyrev"] = hex(manifest1.get(cp, nullid))
524 fp2 = nullid
527 fp2 = nullid
525 elif fp2 != nullid: # copied on remote side
528 elif fp2 != nullid: # copied on remote side
526 meta["copyrev"] = hex(manifest1.get(cp, nullid))
529 meta["copyrev"] = hex(manifest1.get(cp, nullid))
527 else: # copied on local side, reversed
530 else: # copied on local side, reversed
528 meta["copyrev"] = hex(manifest2.get(cp))
531 meta["copyrev"] = hex(manifest2.get(cp))
529 fp2 = nullid
532 fp2 = nullid
530 self.ui.debug(_(" %s: copy %s:%s\n") %
533 self.ui.debug(_(" %s: copy %s:%s\n") %
531 (fn, cp, meta["copyrev"]))
534 (fn, cp, meta["copyrev"]))
532 fp1 = nullid
535 fp1 = nullid
533 elif fp2 != nullid:
536 elif fp2 != nullid:
534 # is one parent an ancestor of the other?
537 # is one parent an ancestor of the other?
535 fpa = fl.ancestor(fp1, fp2)
538 fpa = fl.ancestor(fp1, fp2)
536 if fpa == fp1:
539 if fpa == fp1:
537 fp1, fp2 = fp2, nullid
540 fp1, fp2 = fp2, nullid
538 elif fpa == fp2:
541 elif fpa == fp2:
539 fp2 = nullid
542 fp2 = nullid
540
543
541 # is the file unmodified from the parent? report existing entry
544 # is the file unmodified from the parent? report existing entry
542 if fp2 == nullid and not fl.cmp(fp1, t):
545 if fp2 == nullid and not fl.cmp(fp1, t):
543 return fp1
546 return fp1
544
547
545 changelist.append(fn)
548 changelist.append(fn)
546 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
549 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
547
550
548 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
551 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
549 orig_parent = self.dirstate.parents()[0] or nullid
552 orig_parent = self.dirstate.parents()[0] or nullid
550 p1 = p1 or self.dirstate.parents()[0] or nullid
553 p1 = p1 or self.dirstate.parents()[0] or nullid
551 p2 = p2 or self.dirstate.parents()[1] or nullid
554 p2 = p2 or self.dirstate.parents()[1] or nullid
552 c1 = self.changelog.read(p1)
555 c1 = self.changelog.read(p1)
553 c2 = self.changelog.read(p2)
556 c2 = self.changelog.read(p2)
554 m1 = self.manifest.read(c1[0]).copy()
557 m1 = self.manifest.read(c1[0]).copy()
555 m2 = self.manifest.read(c2[0])
558 m2 = self.manifest.read(c2[0])
556 changed = []
559 changed = []
557 removed = []
560 removed = []
558
561
559 if orig_parent == p1:
562 if orig_parent == p1:
560 update_dirstate = 1
563 update_dirstate = 1
561 else:
564 else:
562 update_dirstate = 0
565 update_dirstate = 0
563
566
564 if not wlock:
567 if not wlock:
565 wlock = self.wlock()
568 wlock = self.wlock()
566 l = self.lock()
569 l = self.lock()
567 tr = self.transaction()
570 tr = self.transaction()
568 linkrev = self.changelog.count()
571 linkrev = self.changelog.count()
569 for f in files:
572 for f in files:
570 try:
573 try:
571 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
574 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
572 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
575 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
573 except IOError:
576 except IOError:
574 try:
577 try:
575 del m1[f]
578 del m1[f]
576 if update_dirstate:
579 if update_dirstate:
577 self.dirstate.forget([f])
580 self.dirstate.forget([f])
578 removed.append(f)
581 removed.append(f)
579 except:
582 except:
580 # deleted from p2?
583 # deleted from p2?
581 pass
584 pass
582
585
583 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
586 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
584 user = user or self.ui.username()
587 user = user or self.ui.username()
585 n = self.changelog.add(mnode, changed + removed, text,
588 n = self.changelog.add(mnode, changed + removed, text,
586 tr, p1, p2, user, date)
589 tr, p1, p2, user, date)
587 tr.close()
590 tr.close()
588 if update_dirstate:
591 if update_dirstate:
589 self.dirstate.setparents(n, nullid)
592 self.dirstate.setparents(n, nullid)
590
593
591 def commit(self, files=None, text="", user=None, date=None,
594 def commit(self, files=None, text="", user=None, date=None,
592 match=util.always, force=False, lock=None, wlock=None,
595 match=util.always, force=False, lock=None, wlock=None,
593 force_editor=False):
596 force_editor=False):
594 commit = []
597 commit = []
595 remove = []
598 remove = []
596 changed = []
599 changed = []
597
600
598 if files:
601 if files:
599 for f in files:
602 for f in files:
600 s = self.dirstate.state(f)
603 s = self.dirstate.state(f)
601 if s in 'nmai':
604 if s in 'nmai':
602 commit.append(f)
605 commit.append(f)
603 elif s == 'r':
606 elif s == 'r':
604 remove.append(f)
607 remove.append(f)
605 else:
608 else:
606 self.ui.warn(_("%s not tracked!\n") % f)
609 self.ui.warn(_("%s not tracked!\n") % f)
607 else:
610 else:
608 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
611 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
609 commit = modified + added
612 commit = modified + added
610 remove = removed
613 remove = removed
611
614
612 p1, p2 = self.dirstate.parents()
615 p1, p2 = self.dirstate.parents()
613 c1 = self.changelog.read(p1)
616 c1 = self.changelog.read(p1)
614 c2 = self.changelog.read(p2)
617 c2 = self.changelog.read(p2)
615 m1 = self.manifest.read(c1[0]).copy()
618 m1 = self.manifest.read(c1[0]).copy()
616 m2 = self.manifest.read(c2[0])
619 m2 = self.manifest.read(c2[0])
617
620
618 branchname = self.workingctx().branch()
621 branchname = self.workingctx().branch()
619 oldname = c1[5].get("branch", "")
622 oldname = c1[5].get("branch", "")
620
623
621 if not commit and not remove and not force and p2 == nullid and \
624 if not commit and not remove and not force and p2 == nullid and \
622 branchname == oldname:
625 branchname == oldname:
623 self.ui.status(_("nothing changed\n"))
626 self.ui.status(_("nothing changed\n"))
624 return None
627 return None
625
628
626 xp1 = hex(p1)
629 xp1 = hex(p1)
627 if p2 == nullid: xp2 = ''
630 if p2 == nullid: xp2 = ''
628 else: xp2 = hex(p2)
631 else: xp2 = hex(p2)
629
632
630 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
633 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
631
634
632 if not wlock:
635 if not wlock:
633 wlock = self.wlock()
636 wlock = self.wlock()
634 if not lock:
637 if not lock:
635 lock = self.lock()
638 lock = self.lock()
636 tr = self.transaction()
639 tr = self.transaction()
637
640
638 # check in files
641 # check in files
639 new = {}
642 new = {}
640 linkrev = self.changelog.count()
643 linkrev = self.changelog.count()
641 commit.sort()
644 commit.sort()
642 for f in commit:
645 for f in commit:
643 self.ui.note(f + "\n")
646 self.ui.note(f + "\n")
644 try:
647 try:
645 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
648 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
646 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
649 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
647 except IOError:
650 except IOError:
648 self.ui.warn(_("trouble committing %s!\n") % f)
651 self.ui.warn(_("trouble committing %s!\n") % f)
649 raise
652 raise
650
653
651 # update manifest
654 # update manifest
652 m1.update(new)
655 m1.update(new)
653 for f in remove:
656 for f in remove:
654 if f in m1:
657 if f in m1:
655 del m1[f]
658 del m1[f]
656 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
659 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
657
660
658 # add changeset
661 # add changeset
659 new = new.keys()
662 new = new.keys()
660 new.sort()
663 new.sort()
661
664
662 user = user or self.ui.username()
665 user = user or self.ui.username()
663 if not text or force_editor:
666 if not text or force_editor:
664 edittext = []
667 edittext = []
665 if text:
668 if text:
666 edittext.append(text)
669 edittext.append(text)
667 edittext.append("")
670 edittext.append("")
668 if p2 != nullid:
671 if p2 != nullid:
669 edittext.append("HG: branch merge")
672 edittext.append("HG: branch merge")
670 edittext.extend(["HG: changed %s" % f for f in changed])
673 edittext.extend(["HG: changed %s" % f for f in changed])
671 edittext.extend(["HG: removed %s" % f for f in remove])
674 edittext.extend(["HG: removed %s" % f for f in remove])
672 if not changed and not remove:
675 if not changed and not remove:
673 edittext.append("HG: no files changed")
676 edittext.append("HG: no files changed")
674 edittext.append("")
677 edittext.append("")
675 # run editor in the repository root
678 # run editor in the repository root
676 olddir = os.getcwd()
679 olddir = os.getcwd()
677 os.chdir(self.root)
680 os.chdir(self.root)
678 text = self.ui.edit("\n".join(edittext), user)
681 text = self.ui.edit("\n".join(edittext), user)
679 os.chdir(olddir)
682 os.chdir(olddir)
680
683
681 lines = [line.rstrip() for line in text.rstrip().splitlines()]
684 lines = [line.rstrip() for line in text.rstrip().splitlines()]
682 while lines and not lines[0]:
685 while lines and not lines[0]:
683 del lines[0]
686 del lines[0]
684 if not lines:
687 if not lines:
685 return None
688 return None
686 text = '\n'.join(lines)
689 text = '\n'.join(lines)
687 extra = {}
690 extra = {}
688 if branchname:
691 if branchname:
689 extra["branch"] = branchname
692 extra["branch"] = branchname
690 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
693 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
691 user, date, extra)
694 user, date, extra)
692 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
695 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
693 parent2=xp2)
696 parent2=xp2)
694 tr.close()
697 tr.close()
695
698
696 self.dirstate.setparents(n)
699 self.dirstate.setparents(n)
697 self.dirstate.update(new, "n")
700 self.dirstate.update(new, "n")
698 self.dirstate.forget(remove)
701 self.dirstate.forget(remove)
699
702
700 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
703 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
701 return n
704 return n
702
705
703 def walk(self, node=None, files=[], match=util.always, badmatch=None):
706 def walk(self, node=None, files=[], match=util.always, badmatch=None):
704 if node:
707 if node:
705 fdict = dict.fromkeys(files)
708 fdict = dict.fromkeys(files)
706 for fn in self.manifest.read(self.changelog.read(node)[0]):
709 for fn in self.manifest.read(self.changelog.read(node)[0]):
707 for ffn in fdict:
710 for ffn in fdict:
708 # match if the file is the exact name or a directory
711 # match if the file is the exact name or a directory
709 if ffn == fn or fn.startswith("%s/" % ffn):
712 if ffn == fn or fn.startswith("%s/" % ffn):
710 del fdict[ffn]
713 del fdict[ffn]
711 break
714 break
712 if match(fn):
715 if match(fn):
713 yield 'm', fn
716 yield 'm', fn
714 for fn in fdict:
717 for fn in fdict:
715 if badmatch and badmatch(fn):
718 if badmatch and badmatch(fn):
716 if match(fn):
719 if match(fn):
717 yield 'b', fn
720 yield 'b', fn
718 else:
721 else:
719 self.ui.warn(_('%s: No such file in rev %s\n') % (
722 self.ui.warn(_('%s: No such file in rev %s\n') % (
720 util.pathto(self.getcwd(), fn), short(node)))
723 util.pathto(self.getcwd(), fn), short(node)))
721 else:
724 else:
722 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
725 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
723 yield src, fn
726 yield src, fn
724
727
725 def status(self, node1=None, node2=None, files=[], match=util.always,
728 def status(self, node1=None, node2=None, files=[], match=util.always,
726 wlock=None, list_ignored=False, list_clean=False):
729 wlock=None, list_ignored=False, list_clean=False):
727 """return status of files between two nodes or node and working directory
730 """return status of files between two nodes or node and working directory
728
731
729 If node1 is None, use the first dirstate parent instead.
732 If node1 is None, use the first dirstate parent instead.
730 If node2 is None, compare node1 with working directory.
733 If node2 is None, compare node1 with working directory.
731 """
734 """
732
735
733 def fcmp(fn, mf):
736 def fcmp(fn, mf):
734 t1 = self.wread(fn)
737 t1 = self.wread(fn)
735 return self.file(fn).cmp(mf.get(fn, nullid), t1)
738 return self.file(fn).cmp(mf.get(fn, nullid), t1)
736
739
737 def mfmatches(node):
740 def mfmatches(node):
738 change = self.changelog.read(node)
741 change = self.changelog.read(node)
739 mf = self.manifest.read(change[0]).copy()
742 mf = self.manifest.read(change[0]).copy()
740 for fn in mf.keys():
743 for fn in mf.keys():
741 if not match(fn):
744 if not match(fn):
742 del mf[fn]
745 del mf[fn]
743 return mf
746 return mf
744
747
745 modified, added, removed, deleted, unknown = [], [], [], [], []
748 modified, added, removed, deleted, unknown = [], [], [], [], []
746 ignored, clean = [], []
749 ignored, clean = [], []
747
750
748 compareworking = False
751 compareworking = False
749 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
752 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
750 compareworking = True
753 compareworking = True
751
754
752 if not compareworking:
755 if not compareworking:
753 # read the manifest from node1 before the manifest from node2,
756 # read the manifest from node1 before the manifest from node2,
754 # so that we'll hit the manifest cache if we're going through
757 # so that we'll hit the manifest cache if we're going through
755 # all the revisions in parent->child order.
758 # all the revisions in parent->child order.
756 mf1 = mfmatches(node1)
759 mf1 = mfmatches(node1)
757
760
758 # are we comparing the working directory?
761 # are we comparing the working directory?
759 if not node2:
762 if not node2:
760 if not wlock:
763 if not wlock:
761 try:
764 try:
762 wlock = self.wlock(wait=0)
765 wlock = self.wlock(wait=0)
763 except lock.LockException:
766 except lock.LockException:
764 wlock = None
767 wlock = None
765 (lookup, modified, added, removed, deleted, unknown,
768 (lookup, modified, added, removed, deleted, unknown,
766 ignored, clean) = self.dirstate.status(files, match,
769 ignored, clean) = self.dirstate.status(files, match,
767 list_ignored, list_clean)
770 list_ignored, list_clean)
768
771
769 # are we comparing working dir against its parent?
772 # are we comparing working dir against its parent?
770 if compareworking:
773 if compareworking:
771 if lookup:
774 if lookup:
772 # do a full compare of any files that might have changed
775 # do a full compare of any files that might have changed
773 mf2 = mfmatches(self.dirstate.parents()[0])
776 mf2 = mfmatches(self.dirstate.parents()[0])
774 for f in lookup:
777 for f in lookup:
775 if fcmp(f, mf2):
778 if fcmp(f, mf2):
776 modified.append(f)
779 modified.append(f)
777 else:
780 else:
778 clean.append(f)
781 clean.append(f)
779 if wlock is not None:
782 if wlock is not None:
780 self.dirstate.update([f], "n")
783 self.dirstate.update([f], "n")
781 else:
784 else:
782 # we are comparing working dir against non-parent
785 # we are comparing working dir against non-parent
783 # generate a pseudo-manifest for the working dir
786 # generate a pseudo-manifest for the working dir
784 # XXX: create it in dirstate.py ?
787 # XXX: create it in dirstate.py ?
785 mf2 = mfmatches(self.dirstate.parents()[0])
788 mf2 = mfmatches(self.dirstate.parents()[0])
786 for f in lookup + modified + added:
789 for f in lookup + modified + added:
787 mf2[f] = ""
790 mf2[f] = ""
788 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
791 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
789 for f in removed:
792 for f in removed:
790 if f in mf2:
793 if f in mf2:
791 del mf2[f]
794 del mf2[f]
792 else:
795 else:
793 # we are comparing two revisions
796 # we are comparing two revisions
794 mf2 = mfmatches(node2)
797 mf2 = mfmatches(node2)
795
798
796 if not compareworking:
799 if not compareworking:
797 # flush lists from dirstate before comparing manifests
800 # flush lists from dirstate before comparing manifests
798 modified, added, clean = [], [], []
801 modified, added, clean = [], [], []
799
802
800 # make sure to sort the files so we talk to the disk in a
803 # make sure to sort the files so we talk to the disk in a
801 # reasonable order
804 # reasonable order
802 mf2keys = mf2.keys()
805 mf2keys = mf2.keys()
803 mf2keys.sort()
806 mf2keys.sort()
804 for fn in mf2keys:
807 for fn in mf2keys:
805 if mf1.has_key(fn):
808 if mf1.has_key(fn):
806 if mf1.flags(fn) != mf2.flags(fn) or \
809 if mf1.flags(fn) != mf2.flags(fn) or \
807 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
810 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
808 modified.append(fn)
811 modified.append(fn)
809 elif list_clean:
812 elif list_clean:
810 clean.append(fn)
813 clean.append(fn)
811 del mf1[fn]
814 del mf1[fn]
812 else:
815 else:
813 added.append(fn)
816 added.append(fn)
814
817
815 removed = mf1.keys()
818 removed = mf1.keys()
816
819
817 # sort and return results:
820 # sort and return results:
818 for l in modified, added, removed, deleted, unknown, ignored, clean:
821 for l in modified, added, removed, deleted, unknown, ignored, clean:
819 l.sort()
822 l.sort()
820 return (modified, added, removed, deleted, unknown, ignored, clean)
823 return (modified, added, removed, deleted, unknown, ignored, clean)
821
824
822 def add(self, list, wlock=None):
825 def add(self, list, wlock=None):
823 if not wlock:
826 if not wlock:
824 wlock = self.wlock()
827 wlock = self.wlock()
825 for f in list:
828 for f in list:
826 p = self.wjoin(f)
829 p = self.wjoin(f)
827 if not os.path.exists(p):
830 if not os.path.exists(p):
828 self.ui.warn(_("%s does not exist!\n") % f)
831 self.ui.warn(_("%s does not exist!\n") % f)
829 elif not os.path.isfile(p):
832 elif not os.path.isfile(p):
830 self.ui.warn(_("%s not added: only files supported currently\n")
833 self.ui.warn(_("%s not added: only files supported currently\n")
831 % f)
834 % f)
832 elif self.dirstate.state(f) in 'an':
835 elif self.dirstate.state(f) in 'an':
833 self.ui.warn(_("%s already tracked!\n") % f)
836 self.ui.warn(_("%s already tracked!\n") % f)
834 else:
837 else:
835 self.dirstate.update([f], "a")
838 self.dirstate.update([f], "a")
836
839
837 def forget(self, list, wlock=None):
840 def forget(self, list, wlock=None):
838 if not wlock:
841 if not wlock:
839 wlock = self.wlock()
842 wlock = self.wlock()
840 for f in list:
843 for f in list:
841 if self.dirstate.state(f) not in 'ai':
844 if self.dirstate.state(f) not in 'ai':
842 self.ui.warn(_("%s not added!\n") % f)
845 self.ui.warn(_("%s not added!\n") % f)
843 else:
846 else:
844 self.dirstate.forget([f])
847 self.dirstate.forget([f])
845
848
846 def remove(self, list, unlink=False, wlock=None):
849 def remove(self, list, unlink=False, wlock=None):
847 if unlink:
850 if unlink:
848 for f in list:
851 for f in list:
849 try:
852 try:
850 util.unlink(self.wjoin(f))
853 util.unlink(self.wjoin(f))
851 except OSError, inst:
854 except OSError, inst:
852 if inst.errno != errno.ENOENT:
855 if inst.errno != errno.ENOENT:
853 raise
856 raise
854 if not wlock:
857 if not wlock:
855 wlock = self.wlock()
858 wlock = self.wlock()
856 for f in list:
859 for f in list:
857 p = self.wjoin(f)
860 p = self.wjoin(f)
858 if os.path.exists(p):
861 if os.path.exists(p):
859 self.ui.warn(_("%s still exists!\n") % f)
862 self.ui.warn(_("%s still exists!\n") % f)
860 elif self.dirstate.state(f) == 'a':
863 elif self.dirstate.state(f) == 'a':
861 self.dirstate.forget([f])
864 self.dirstate.forget([f])
862 elif f not in self.dirstate:
865 elif f not in self.dirstate:
863 self.ui.warn(_("%s not tracked!\n") % f)
866 self.ui.warn(_("%s not tracked!\n") % f)
864 else:
867 else:
865 self.dirstate.update([f], "r")
868 self.dirstate.update([f], "r")
866
869
867 def undelete(self, list, wlock=None):
870 def undelete(self, list, wlock=None):
868 p = self.dirstate.parents()[0]
871 p = self.dirstate.parents()[0]
869 mn = self.changelog.read(p)[0]
872 mn = self.changelog.read(p)[0]
870 m = self.manifest.read(mn)
873 m = self.manifest.read(mn)
871 if not wlock:
874 if not wlock:
872 wlock = self.wlock()
875 wlock = self.wlock()
873 for f in list:
876 for f in list:
874 if self.dirstate.state(f) not in "r":
877 if self.dirstate.state(f) not in "r":
875 self.ui.warn("%s not removed!\n" % f)
878 self.ui.warn("%s not removed!\n" % f)
876 else:
879 else:
877 t = self.file(f).read(m[f])
880 t = self.file(f).read(m[f])
878 self.wwrite(f, t)
881 self.wwrite(f, t)
879 util.set_exec(self.wjoin(f), m.execf(f))
882 util.set_exec(self.wjoin(f), m.execf(f))
880 self.dirstate.update([f], "n")
883 self.dirstate.update([f], "n")
881
884
882 def copy(self, source, dest, wlock=None):
885 def copy(self, source, dest, wlock=None):
883 p = self.wjoin(dest)
886 p = self.wjoin(dest)
884 if not os.path.exists(p):
887 if not os.path.exists(p):
885 self.ui.warn(_("%s does not exist!\n") % dest)
888 self.ui.warn(_("%s does not exist!\n") % dest)
886 elif not os.path.isfile(p):
889 elif not os.path.isfile(p):
887 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
890 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
888 else:
891 else:
889 if not wlock:
892 if not wlock:
890 wlock = self.wlock()
893 wlock = self.wlock()
891 if self.dirstate.state(dest) == '?':
894 if self.dirstate.state(dest) == '?':
892 self.dirstate.update([dest], "a")
895 self.dirstate.update([dest], "a")
893 self.dirstate.copy(source, dest)
896 self.dirstate.copy(source, dest)
894
897
895 def heads(self, start=None):
898 def heads(self, start=None):
896 heads = self.changelog.heads(start)
899 heads = self.changelog.heads(start)
897 # sort the output in rev descending order
900 # sort the output in rev descending order
898 heads = [(-self.changelog.rev(h), h) for h in heads]
901 heads = [(-self.changelog.rev(h), h) for h in heads]
899 heads.sort()
902 heads.sort()
900 return [n for (r, n) in heads]
903 return [n for (r, n) in heads]
901
904
902 # branchlookup returns a dict giving a list of branches for
905 # branchlookup returns a dict giving a list of branches for
903 # each head. A branch is defined as the tag of a node or
906 # each head. A branch is defined as the tag of a node or
904 # the branch of the node's parents. If a node has multiple
907 # the branch of the node's parents. If a node has multiple
905 # branch tags, tags are eliminated if they are visible from other
908 # branch tags, tags are eliminated if they are visible from other
906 # branch tags.
909 # branch tags.
907 #
910 #
908 # So, for this graph: a->b->c->d->e
911 # So, for this graph: a->b->c->d->e
909 # \ /
912 # \ /
910 # aa -----/
913 # aa -----/
911 # a has tag 2.6.12
914 # a has tag 2.6.12
912 # d has tag 2.6.13
915 # d has tag 2.6.13
913 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
916 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
914 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
917 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
915 # from the list.
918 # from the list.
916 #
919 #
917 # It is possible that more than one head will have the same branch tag.
920 # It is possible that more than one head will have the same branch tag.
918 # callers need to check the result for multiple heads under the same
921 # callers need to check the result for multiple heads under the same
919 # branch tag if that is a problem for them (ie checkout of a specific
922 # branch tag if that is a problem for them (ie checkout of a specific
920 # branch).
923 # branch).
921 #
924 #
922 # passing in a specific branch will limit the depth of the search
925 # passing in a specific branch will limit the depth of the search
923 # through the parents. It won't limit the branches returned in the
926 # through the parents. It won't limit the branches returned in the
924 # result though.
927 # result though.
925 def branchlookup(self, heads=None, branch=None):
928 def branchlookup(self, heads=None, branch=None):
926 if not heads:
929 if not heads:
927 heads = self.heads()
930 heads = self.heads()
928 headt = [ h for h in heads ]
931 headt = [ h for h in heads ]
929 chlog = self.changelog
932 chlog = self.changelog
930 branches = {}
933 branches = {}
931 merges = []
934 merges = []
932 seenmerge = {}
935 seenmerge = {}
933
936
934 # traverse the tree once for each head, recording in the branches
937 # traverse the tree once for each head, recording in the branches
935 # dict which tags are visible from this head. The branches
938 # dict which tags are visible from this head. The branches
936 # dict also records which tags are visible from each tag
939 # dict also records which tags are visible from each tag
937 # while we traverse.
940 # while we traverse.
938 while headt or merges:
941 while headt or merges:
939 if merges:
942 if merges:
940 n, found = merges.pop()
943 n, found = merges.pop()
941 visit = [n]
944 visit = [n]
942 else:
945 else:
943 h = headt.pop()
946 h = headt.pop()
944 visit = [h]
947 visit = [h]
945 found = [h]
948 found = [h]
946 seen = {}
949 seen = {}
947 while visit:
950 while visit:
948 n = visit.pop()
951 n = visit.pop()
949 if n in seen:
952 if n in seen:
950 continue
953 continue
951 pp = chlog.parents(n)
954 pp = chlog.parents(n)
952 tags = self.nodetags(n)
955 tags = self.nodetags(n)
953 if tags:
956 if tags:
954 for x in tags:
957 for x in tags:
955 if x == 'tip':
958 if x == 'tip':
956 continue
959 continue
957 for f in found:
960 for f in found:
958 branches.setdefault(f, {})[n] = 1
961 branches.setdefault(f, {})[n] = 1
959 branches.setdefault(n, {})[n] = 1
962 branches.setdefault(n, {})[n] = 1
960 break
963 break
961 if n not in found:
964 if n not in found:
962 found.append(n)
965 found.append(n)
963 if branch in tags:
966 if branch in tags:
964 continue
967 continue
965 seen[n] = 1
968 seen[n] = 1
966 if pp[1] != nullid and n not in seenmerge:
969 if pp[1] != nullid and n not in seenmerge:
967 merges.append((pp[1], [x for x in found]))
970 merges.append((pp[1], [x for x in found]))
968 seenmerge[n] = 1
971 seenmerge[n] = 1
969 if pp[0] != nullid:
972 if pp[0] != nullid:
970 visit.append(pp[0])
973 visit.append(pp[0])
971 # traverse the branches dict, eliminating branch tags from each
974 # traverse the branches dict, eliminating branch tags from each
972 # head that are visible from another branch tag for that head.
975 # head that are visible from another branch tag for that head.
973 out = {}
976 out = {}
974 viscache = {}
977 viscache = {}
975 for h in heads:
978 for h in heads:
976 def visible(node):
979 def visible(node):
977 if node in viscache:
980 if node in viscache:
978 return viscache[node]
981 return viscache[node]
979 ret = {}
982 ret = {}
980 visit = [node]
983 visit = [node]
981 while visit:
984 while visit:
982 x = visit.pop()
985 x = visit.pop()
983 if x in viscache:
986 if x in viscache:
984 ret.update(viscache[x])
987 ret.update(viscache[x])
985 elif x not in ret:
988 elif x not in ret:
986 ret[x] = 1
989 ret[x] = 1
987 if x in branches:
990 if x in branches:
988 visit[len(visit):] = branches[x].keys()
991 visit[len(visit):] = branches[x].keys()
989 viscache[node] = ret
992 viscache[node] = ret
990 return ret
993 return ret
991 if h not in branches:
994 if h not in branches:
992 continue
995 continue
993 # O(n^2), but somewhat limited. This only searches the
996 # O(n^2), but somewhat limited. This only searches the
994 # tags visible from a specific head, not all the tags in the
997 # tags visible from a specific head, not all the tags in the
995 # whole repo.
998 # whole repo.
996 for b in branches[h]:
999 for b in branches[h]:
997 vis = False
1000 vis = False
998 for bb in branches[h].keys():
1001 for bb in branches[h].keys():
999 if b != bb:
1002 if b != bb:
1000 if b in visible(bb):
1003 if b in visible(bb):
1001 vis = True
1004 vis = True
1002 break
1005 break
1003 if not vis:
1006 if not vis:
1004 l = out.setdefault(h, [])
1007 l = out.setdefault(h, [])
1005 l[len(l):] = self.nodetags(b)
1008 l[len(l):] = self.nodetags(b)
1006 return out
1009 return out
1007
1010
1008 def branches(self, nodes):
1011 def branches(self, nodes):
1009 if not nodes:
1012 if not nodes:
1010 nodes = [self.changelog.tip()]
1013 nodes = [self.changelog.tip()]
1011 b = []
1014 b = []
1012 for n in nodes:
1015 for n in nodes:
1013 t = n
1016 t = n
1014 while 1:
1017 while 1:
1015 p = self.changelog.parents(n)
1018 p = self.changelog.parents(n)
1016 if p[1] != nullid or p[0] == nullid:
1019 if p[1] != nullid or p[0] == nullid:
1017 b.append((t, n, p[0], p[1]))
1020 b.append((t, n, p[0], p[1]))
1018 break
1021 break
1019 n = p[0]
1022 n = p[0]
1020 return b
1023 return b
1021
1024
1022 def between(self, pairs):
1025 def between(self, pairs):
1023 r = []
1026 r = []
1024
1027
1025 for top, bottom in pairs:
1028 for top, bottom in pairs:
1026 n, l, i = top, [], 0
1029 n, l, i = top, [], 0
1027 f = 1
1030 f = 1
1028
1031
1029 while n != bottom:
1032 while n != bottom:
1030 p = self.changelog.parents(n)[0]
1033 p = self.changelog.parents(n)[0]
1031 if i == f:
1034 if i == f:
1032 l.append(n)
1035 l.append(n)
1033 f = f * 2
1036 f = f * 2
1034 n = p
1037 n = p
1035 i += 1
1038 i += 1
1036
1039
1037 r.append(l)
1040 r.append(l)
1038
1041
1039 return r
1042 return r
1040
1043
1041 def findincoming(self, remote, base=None, heads=None, force=False):
1044 def findincoming(self, remote, base=None, heads=None, force=False):
1042 """Return list of roots of the subsets of missing nodes from remote
1045 """Return list of roots of the subsets of missing nodes from remote
1043
1046
1044 If base dict is specified, assume that these nodes and their parents
1047 If base dict is specified, assume that these nodes and their parents
1045 exist on the remote side and that no child of a node of base exists
1048 exist on the remote side and that no child of a node of base exists
1046 in both remote and self.
1049 in both remote and self.
1047 Furthermore base will be updated to include the nodes that exists
1050 Furthermore base will be updated to include the nodes that exists
1048 in self and remote but no children exists in self and remote.
1051 in self and remote but no children exists in self and remote.
1049 If a list of heads is specified, return only nodes which are heads
1052 If a list of heads is specified, return only nodes which are heads
1050 or ancestors of these heads.
1053 or ancestors of these heads.
1051
1054
1052 All the ancestors of base are in self and in remote.
1055 All the ancestors of base are in self and in remote.
1053 All the descendants of the list returned are missing in self.
1056 All the descendants of the list returned are missing in self.
1054 (and so we know that the rest of the nodes are missing in remote, see
1057 (and so we know that the rest of the nodes are missing in remote, see
1055 outgoing)
1058 outgoing)
1056 """
1059 """
1057 m = self.changelog.nodemap
1060 m = self.changelog.nodemap
1058 search = []
1061 search = []
1059 fetch = {}
1062 fetch = {}
1060 seen = {}
1063 seen = {}
1061 seenbranch = {}
1064 seenbranch = {}
1062 if base == None:
1065 if base == None:
1063 base = {}
1066 base = {}
1064
1067
1065 if not heads:
1068 if not heads:
1066 heads = remote.heads()
1069 heads = remote.heads()
1067
1070
1068 if self.changelog.tip() == nullid:
1071 if self.changelog.tip() == nullid:
1069 base[nullid] = 1
1072 base[nullid] = 1
1070 if heads != [nullid]:
1073 if heads != [nullid]:
1071 return [nullid]
1074 return [nullid]
1072 return []
1075 return []
1073
1076
1074 # assume we're closer to the tip than the root
1077 # assume we're closer to the tip than the root
1075 # and start by examining the heads
1078 # and start by examining the heads
1076 self.ui.status(_("searching for changes\n"))
1079 self.ui.status(_("searching for changes\n"))
1077
1080
1078 unknown = []
1081 unknown = []
1079 for h in heads:
1082 for h in heads:
1080 if h not in m:
1083 if h not in m:
1081 unknown.append(h)
1084 unknown.append(h)
1082 else:
1085 else:
1083 base[h] = 1
1086 base[h] = 1
1084
1087
1085 if not unknown:
1088 if not unknown:
1086 return []
1089 return []
1087
1090
1088 req = dict.fromkeys(unknown)
1091 req = dict.fromkeys(unknown)
1089 reqcnt = 0
1092 reqcnt = 0
1090
1093
1091 # search through remote branches
1094 # search through remote branches
1092 # a 'branch' here is a linear segment of history, with four parts:
1095 # a 'branch' here is a linear segment of history, with four parts:
1093 # head, root, first parent, second parent
1096 # head, root, first parent, second parent
1094 # (a branch always has two parents (or none) by definition)
1097 # (a branch always has two parents (or none) by definition)
1095 unknown = remote.branches(unknown)
1098 unknown = remote.branches(unknown)
1096 while unknown:
1099 while unknown:
1097 r = []
1100 r = []
1098 while unknown:
1101 while unknown:
1099 n = unknown.pop(0)
1102 n = unknown.pop(0)
1100 if n[0] in seen:
1103 if n[0] in seen:
1101 continue
1104 continue
1102
1105
1103 self.ui.debug(_("examining %s:%s\n")
1106 self.ui.debug(_("examining %s:%s\n")
1104 % (short(n[0]), short(n[1])))
1107 % (short(n[0]), short(n[1])))
1105 if n[0] == nullid: # found the end of the branch
1108 if n[0] == nullid: # found the end of the branch
1106 pass
1109 pass
1107 elif n in seenbranch:
1110 elif n in seenbranch:
1108 self.ui.debug(_("branch already found\n"))
1111 self.ui.debug(_("branch already found\n"))
1109 continue
1112 continue
1110 elif n[1] and n[1] in m: # do we know the base?
1113 elif n[1] and n[1] in m: # do we know the base?
1111 self.ui.debug(_("found incomplete branch %s:%s\n")
1114 self.ui.debug(_("found incomplete branch %s:%s\n")
1112 % (short(n[0]), short(n[1])))
1115 % (short(n[0]), short(n[1])))
1113 search.append(n) # schedule branch range for scanning
1116 search.append(n) # schedule branch range for scanning
1114 seenbranch[n] = 1
1117 seenbranch[n] = 1
1115 else:
1118 else:
1116 if n[1] not in seen and n[1] not in fetch:
1119 if n[1] not in seen and n[1] not in fetch:
1117 if n[2] in m and n[3] in m:
1120 if n[2] in m and n[3] in m:
1118 self.ui.debug(_("found new changeset %s\n") %
1121 self.ui.debug(_("found new changeset %s\n") %
1119 short(n[1]))
1122 short(n[1]))
1120 fetch[n[1]] = 1 # earliest unknown
1123 fetch[n[1]] = 1 # earliest unknown
1121 for p in n[2:4]:
1124 for p in n[2:4]:
1122 if p in m:
1125 if p in m:
1123 base[p] = 1 # latest known
1126 base[p] = 1 # latest known
1124
1127
1125 for p in n[2:4]:
1128 for p in n[2:4]:
1126 if p not in req and p not in m:
1129 if p not in req and p not in m:
1127 r.append(p)
1130 r.append(p)
1128 req[p] = 1
1131 req[p] = 1
1129 seen[n[0]] = 1
1132 seen[n[0]] = 1
1130
1133
1131 if r:
1134 if r:
1132 reqcnt += 1
1135 reqcnt += 1
1133 self.ui.debug(_("request %d: %s\n") %
1136 self.ui.debug(_("request %d: %s\n") %
1134 (reqcnt, " ".join(map(short, r))))
1137 (reqcnt, " ".join(map(short, r))))
1135 for p in range(0, len(r), 10):
1138 for p in range(0, len(r), 10):
1136 for b in remote.branches(r[p:p+10]):
1139 for b in remote.branches(r[p:p+10]):
1137 self.ui.debug(_("received %s:%s\n") %
1140 self.ui.debug(_("received %s:%s\n") %
1138 (short(b[0]), short(b[1])))
1141 (short(b[0]), short(b[1])))
1139 unknown.append(b)
1142 unknown.append(b)
1140
1143
1141 # do binary search on the branches we found
1144 # do binary search on the branches we found
1142 while search:
1145 while search:
1143 n = search.pop(0)
1146 n = search.pop(0)
1144 reqcnt += 1
1147 reqcnt += 1
1145 l = remote.between([(n[0], n[1])])[0]
1148 l = remote.between([(n[0], n[1])])[0]
1146 l.append(n[1])
1149 l.append(n[1])
1147 p = n[0]
1150 p = n[0]
1148 f = 1
1151 f = 1
1149 for i in l:
1152 for i in l:
1150 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1153 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1151 if i in m:
1154 if i in m:
1152 if f <= 2:
1155 if f <= 2:
1153 self.ui.debug(_("found new branch changeset %s\n") %
1156 self.ui.debug(_("found new branch changeset %s\n") %
1154 short(p))
1157 short(p))
1155 fetch[p] = 1
1158 fetch[p] = 1
1156 base[i] = 1
1159 base[i] = 1
1157 else:
1160 else:
1158 self.ui.debug(_("narrowed branch search to %s:%s\n")
1161 self.ui.debug(_("narrowed branch search to %s:%s\n")
1159 % (short(p), short(i)))
1162 % (short(p), short(i)))
1160 search.append((p, i))
1163 search.append((p, i))
1161 break
1164 break
1162 p, f = i, f * 2
1165 p, f = i, f * 2
1163
1166
1164 # sanity check our fetch list
1167 # sanity check our fetch list
1165 for f in fetch.keys():
1168 for f in fetch.keys():
1166 if f in m:
1169 if f in m:
1167 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1170 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1168
1171
1169 if base.keys() == [nullid]:
1172 if base.keys() == [nullid]:
1170 if force:
1173 if force:
1171 self.ui.warn(_("warning: repository is unrelated\n"))
1174 self.ui.warn(_("warning: repository is unrelated\n"))
1172 else:
1175 else:
1173 raise util.Abort(_("repository is unrelated"))
1176 raise util.Abort(_("repository is unrelated"))
1174
1177
1175 self.ui.debug(_("found new changesets starting at ") +
1178 self.ui.debug(_("found new changesets starting at ") +
1176 " ".join([short(f) for f in fetch]) + "\n")
1179 " ".join([short(f) for f in fetch]) + "\n")
1177
1180
1178 self.ui.debug(_("%d total queries\n") % reqcnt)
1181 self.ui.debug(_("%d total queries\n") % reqcnt)
1179
1182
1180 return fetch.keys()
1183 return fetch.keys()
1181
1184
1182 def findoutgoing(self, remote, base=None, heads=None, force=False):
1185 def findoutgoing(self, remote, base=None, heads=None, force=False):
1183 """Return list of nodes that are roots of subsets not in remote
1186 """Return list of nodes that are roots of subsets not in remote
1184
1187
1185 If base dict is specified, assume that these nodes and their parents
1188 If base dict is specified, assume that these nodes and their parents
1186 exist on the remote side.
1189 exist on the remote side.
1187 If a list of heads is specified, return only nodes which are heads
1190 If a list of heads is specified, return only nodes which are heads
1188 or ancestors of these heads, and return a second element which
1191 or ancestors of these heads, and return a second element which
1189 contains all remote heads which get new children.
1192 contains all remote heads which get new children.
1190 """
1193 """
1191 if base == None:
1194 if base == None:
1192 base = {}
1195 base = {}
1193 self.findincoming(remote, base, heads, force=force)
1196 self.findincoming(remote, base, heads, force=force)
1194
1197
1195 self.ui.debug(_("common changesets up to ")
1198 self.ui.debug(_("common changesets up to ")
1196 + " ".join(map(short, base.keys())) + "\n")
1199 + " ".join(map(short, base.keys())) + "\n")
1197
1200
1198 remain = dict.fromkeys(self.changelog.nodemap)
1201 remain = dict.fromkeys(self.changelog.nodemap)
1199
1202
1200 # prune everything remote has from the tree
1203 # prune everything remote has from the tree
1201 del remain[nullid]
1204 del remain[nullid]
1202 remove = base.keys()
1205 remove = base.keys()
1203 while remove:
1206 while remove:
1204 n = remove.pop(0)
1207 n = remove.pop(0)
1205 if n in remain:
1208 if n in remain:
1206 del remain[n]
1209 del remain[n]
1207 for p in self.changelog.parents(n):
1210 for p in self.changelog.parents(n):
1208 remove.append(p)
1211 remove.append(p)
1209
1212
1210 # find every node whose parents have been pruned
1213 # find every node whose parents have been pruned
1211 subset = []
1214 subset = []
1212 # find every remote head that will get new children
1215 # find every remote head that will get new children
1213 updated_heads = {}
1216 updated_heads = {}
1214 for n in remain:
1217 for n in remain:
1215 p1, p2 = self.changelog.parents(n)
1218 p1, p2 = self.changelog.parents(n)
1216 if p1 not in remain and p2 not in remain:
1219 if p1 not in remain and p2 not in remain:
1217 subset.append(n)
1220 subset.append(n)
1218 if heads:
1221 if heads:
1219 if p1 in heads:
1222 if p1 in heads:
1220 updated_heads[p1] = True
1223 updated_heads[p1] = True
1221 if p2 in heads:
1224 if p2 in heads:
1222 updated_heads[p2] = True
1225 updated_heads[p2] = True
1223
1226
1224 # this is the set of all roots we have to push
1227 # this is the set of all roots we have to push
1225 if heads:
1228 if heads:
1226 return subset, updated_heads.keys()
1229 return subset, updated_heads.keys()
1227 else:
1230 else:
1228 return subset
1231 return subset
1229
1232
1230 def pull(self, remote, heads=None, force=False, lock=None):
1233 def pull(self, remote, heads=None, force=False, lock=None):
1231 mylock = False
1234 mylock = False
1232 if not lock:
1235 if not lock:
1233 lock = self.lock()
1236 lock = self.lock()
1234 mylock = True
1237 mylock = True
1235
1238
1236 try:
1239 try:
1237 fetch = self.findincoming(remote, force=force)
1240 fetch = self.findincoming(remote, force=force)
1238 if fetch == [nullid]:
1241 if fetch == [nullid]:
1239 self.ui.status(_("requesting all changes\n"))
1242 self.ui.status(_("requesting all changes\n"))
1240
1243
1241 if not fetch:
1244 if not fetch:
1242 self.ui.status(_("no changes found\n"))
1245 self.ui.status(_("no changes found\n"))
1243 return 0
1246 return 0
1244
1247
1245 if heads is None:
1248 if heads is None:
1246 cg = remote.changegroup(fetch, 'pull')
1249 cg = remote.changegroup(fetch, 'pull')
1247 else:
1250 else:
1248 if 'changegroupsubset' not in remote.capabilities:
1251 if 'changegroupsubset' not in remote.capabilities:
1249 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1252 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1250 cg = remote.changegroupsubset(fetch, heads, 'pull')
1253 cg = remote.changegroupsubset(fetch, heads, 'pull')
1251 return self.addchangegroup(cg, 'pull', remote.url())
1254 return self.addchangegroup(cg, 'pull', remote.url())
1252 finally:
1255 finally:
1253 if mylock:
1256 if mylock:
1254 lock.release()
1257 lock.release()
1255
1258
1256 def push(self, remote, force=False, revs=None):
1259 def push(self, remote, force=False, revs=None):
1257 # there are two ways to push to remote repo:
1260 # there are two ways to push to remote repo:
1258 #
1261 #
1259 # addchangegroup assumes local user can lock remote
1262 # addchangegroup assumes local user can lock remote
1260 # repo (local filesystem, old ssh servers).
1263 # repo (local filesystem, old ssh servers).
1261 #
1264 #
1262 # unbundle assumes local user cannot lock remote repo (new ssh
1265 # unbundle assumes local user cannot lock remote repo (new ssh
1263 # servers, http servers).
1266 # servers, http servers).
1264
1267
1265 if remote.capable('unbundle'):
1268 if remote.capable('unbundle'):
1266 return self.push_unbundle(remote, force, revs)
1269 return self.push_unbundle(remote, force, revs)
1267 return self.push_addchangegroup(remote, force, revs)
1270 return self.push_addchangegroup(remote, force, revs)
1268
1271
1269 def prepush(self, remote, force, revs):
1272 def prepush(self, remote, force, revs):
1270 base = {}
1273 base = {}
1271 remote_heads = remote.heads()
1274 remote_heads = remote.heads()
1272 inc = self.findincoming(remote, base, remote_heads, force=force)
1275 inc = self.findincoming(remote, base, remote_heads, force=force)
1273 if not force and inc:
1276 if not force and inc:
1274 self.ui.warn(_("abort: unsynced remote changes!\n"))
1277 self.ui.warn(_("abort: unsynced remote changes!\n"))
1275 self.ui.status(_("(did you forget to sync?"
1278 self.ui.status(_("(did you forget to sync?"
1276 " use push -f to force)\n"))
1279 " use push -f to force)\n"))
1277 return None, 1
1280 return None, 1
1278
1281
1279 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1282 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1280 if revs is not None:
1283 if revs is not None:
1281 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1284 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1282 else:
1285 else:
1283 bases, heads = update, self.changelog.heads()
1286 bases, heads = update, self.changelog.heads()
1284
1287
1285 if not bases:
1288 if not bases:
1286 self.ui.status(_("no changes found\n"))
1289 self.ui.status(_("no changes found\n"))
1287 return None, 1
1290 return None, 1
1288 elif not force:
1291 elif not force:
1289 # FIXME we don't properly detect creation of new heads
1292 # FIXME we don't properly detect creation of new heads
1290 # in the push -r case, assume the user knows what he's doing
1293 # in the push -r case, assume the user knows what he's doing
1291 if not revs and len(remote_heads) < len(heads) \
1294 if not revs and len(remote_heads) < len(heads) \
1292 and remote_heads != [nullid]:
1295 and remote_heads != [nullid]:
1293 self.ui.warn(_("abort: push creates new remote branches!\n"))
1296 self.ui.warn(_("abort: push creates new remote branches!\n"))
1294 self.ui.status(_("(did you forget to merge?"
1297 self.ui.status(_("(did you forget to merge?"
1295 " use push -f to force)\n"))
1298 " use push -f to force)\n"))
1296 return None, 1
1299 return None, 1
1297
1300
1298 if revs is None:
1301 if revs is None:
1299 cg = self.changegroup(update, 'push')
1302 cg = self.changegroup(update, 'push')
1300 else:
1303 else:
1301 cg = self.changegroupsubset(update, revs, 'push')
1304 cg = self.changegroupsubset(update, revs, 'push')
1302 return cg, remote_heads
1305 return cg, remote_heads
1303
1306
1304 def push_addchangegroup(self, remote, force, revs):
1307 def push_addchangegroup(self, remote, force, revs):
1305 lock = remote.lock()
1308 lock = remote.lock()
1306
1309
1307 ret = self.prepush(remote, force, revs)
1310 ret = self.prepush(remote, force, revs)
1308 if ret[0] is not None:
1311 if ret[0] is not None:
1309 cg, remote_heads = ret
1312 cg, remote_heads = ret
1310 return remote.addchangegroup(cg, 'push', self.url())
1313 return remote.addchangegroup(cg, 'push', self.url())
1311 return ret[1]
1314 return ret[1]
1312
1315
1313 def push_unbundle(self, remote, force, revs):
1316 def push_unbundle(self, remote, force, revs):
1314 # local repo finds heads on server, finds out what revs it
1317 # local repo finds heads on server, finds out what revs it
1315 # must push. once revs transferred, if server finds it has
1318 # must push. once revs transferred, if server finds it has
1316 # different heads (someone else won commit/push race), server
1319 # different heads (someone else won commit/push race), server
1317 # aborts.
1320 # aborts.
1318
1321
1319 ret = self.prepush(remote, force, revs)
1322 ret = self.prepush(remote, force, revs)
1320 if ret[0] is not None:
1323 if ret[0] is not None:
1321 cg, remote_heads = ret
1324 cg, remote_heads = ret
1322 if force: remote_heads = ['force']
1325 if force: remote_heads = ['force']
1323 return remote.unbundle(cg, remote_heads, 'push')
1326 return remote.unbundle(cg, remote_heads, 'push')
1324 return ret[1]
1327 return ret[1]
1325
1328
1326 def changegroupsubset(self, bases, heads, source):
1329 def changegroupsubset(self, bases, heads, source):
1327 """This function generates a changegroup consisting of all the nodes
1330 """This function generates a changegroup consisting of all the nodes
1328 that are descendents of any of the bases, and ancestors of any of
1331 that are descendents of any of the bases, and ancestors of any of
1329 the heads.
1332 the heads.
1330
1333
1331 It is fairly complex as determining which filenodes and which
1334 It is fairly complex as determining which filenodes and which
1332 manifest nodes need to be included for the changeset to be complete
1335 manifest nodes need to be included for the changeset to be complete
1333 is non-trivial.
1336 is non-trivial.
1334
1337
1335 Another wrinkle is doing the reverse, figuring out which changeset in
1338 Another wrinkle is doing the reverse, figuring out which changeset in
1336 the changegroup a particular filenode or manifestnode belongs to."""
1339 the changegroup a particular filenode or manifestnode belongs to."""
1337
1340
1338 self.hook('preoutgoing', throw=True, source=source)
1341 self.hook('preoutgoing', throw=True, source=source)
1339
1342
1340 # Set up some initial variables
1343 # Set up some initial variables
1341 # Make it easy to refer to self.changelog
1344 # Make it easy to refer to self.changelog
1342 cl = self.changelog
1345 cl = self.changelog
1343 # msng is short for missing - compute the list of changesets in this
1346 # msng is short for missing - compute the list of changesets in this
1344 # changegroup.
1347 # changegroup.
1345 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1348 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1346 # Some bases may turn out to be superfluous, and some heads may be
1349 # Some bases may turn out to be superfluous, and some heads may be
1347 # too. nodesbetween will return the minimal set of bases and heads
1350 # too. nodesbetween will return the minimal set of bases and heads
1348 # necessary to re-create the changegroup.
1351 # necessary to re-create the changegroup.
1349
1352
1350 # Known heads are the list of heads that it is assumed the recipient
1353 # Known heads are the list of heads that it is assumed the recipient
1351 # of this changegroup will know about.
1354 # of this changegroup will know about.
1352 knownheads = {}
1355 knownheads = {}
1353 # We assume that all parents of bases are known heads.
1356 # We assume that all parents of bases are known heads.
1354 for n in bases:
1357 for n in bases:
1355 for p in cl.parents(n):
1358 for p in cl.parents(n):
1356 if p != nullid:
1359 if p != nullid:
1357 knownheads[p] = 1
1360 knownheads[p] = 1
1358 knownheads = knownheads.keys()
1361 knownheads = knownheads.keys()
1359 if knownheads:
1362 if knownheads:
1360 # Now that we know what heads are known, we can compute which
1363 # Now that we know what heads are known, we can compute which
1361 # changesets are known. The recipient must know about all
1364 # changesets are known. The recipient must know about all
1362 # changesets required to reach the known heads from the null
1365 # changesets required to reach the known heads from the null
1363 # changeset.
1366 # changeset.
1364 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1367 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1365 junk = None
1368 junk = None
1366 # Transform the list into an ersatz set.
1369 # Transform the list into an ersatz set.
1367 has_cl_set = dict.fromkeys(has_cl_set)
1370 has_cl_set = dict.fromkeys(has_cl_set)
1368 else:
1371 else:
1369 # If there were no known heads, the recipient cannot be assumed to
1372 # If there were no known heads, the recipient cannot be assumed to
1370 # know about any changesets.
1373 # know about any changesets.
1371 has_cl_set = {}
1374 has_cl_set = {}
1372
1375
1373 # Make it easy to refer to self.manifest
1376 # Make it easy to refer to self.manifest
1374 mnfst = self.manifest
1377 mnfst = self.manifest
1375 # We don't know which manifests are missing yet
1378 # We don't know which manifests are missing yet
1376 msng_mnfst_set = {}
1379 msng_mnfst_set = {}
1377 # Nor do we know which filenodes are missing.
1380 # Nor do we know which filenodes are missing.
1378 msng_filenode_set = {}
1381 msng_filenode_set = {}
1379
1382
1380 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1383 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1381 junk = None
1384 junk = None
1382
1385
1383 # A changeset always belongs to itself, so the changenode lookup
1386 # A changeset always belongs to itself, so the changenode lookup
1384 # function for a changenode is identity.
1387 # function for a changenode is identity.
1385 def identity(x):
1388 def identity(x):
1386 return x
1389 return x
1387
1390
1388 # A function generating function. Sets up an environment for the
1391 # A function generating function. Sets up an environment for the
1389 # inner function.
1392 # inner function.
1390 def cmp_by_rev_func(revlog):
1393 def cmp_by_rev_func(revlog):
1391 # Compare two nodes by their revision number in the environment's
1394 # Compare two nodes by their revision number in the environment's
1392 # revision history. Since the revision number both represents the
1395 # revision history. Since the revision number both represents the
1393 # most efficient order to read the nodes in, and represents a
1396 # most efficient order to read the nodes in, and represents a
1394 # topological sorting of the nodes, this function is often useful.
1397 # topological sorting of the nodes, this function is often useful.
1395 def cmp_by_rev(a, b):
1398 def cmp_by_rev(a, b):
1396 return cmp(revlog.rev(a), revlog.rev(b))
1399 return cmp(revlog.rev(a), revlog.rev(b))
1397 return cmp_by_rev
1400 return cmp_by_rev
1398
1401
1399 # If we determine that a particular file or manifest node must be a
1402 # If we determine that a particular file or manifest node must be a
1400 # node that the recipient of the changegroup will already have, we can
1403 # node that the recipient of the changegroup will already have, we can
1401 # also assume the recipient will have all the parents. This function
1404 # also assume the recipient will have all the parents. This function
1402 # prunes them from the set of missing nodes.
1405 # prunes them from the set of missing nodes.
1403 def prune_parents(revlog, hasset, msngset):
1406 def prune_parents(revlog, hasset, msngset):
1404 haslst = hasset.keys()
1407 haslst = hasset.keys()
1405 haslst.sort(cmp_by_rev_func(revlog))
1408 haslst.sort(cmp_by_rev_func(revlog))
1406 for node in haslst:
1409 for node in haslst:
1407 parentlst = [p for p in revlog.parents(node) if p != nullid]
1410 parentlst = [p for p in revlog.parents(node) if p != nullid]
1408 while parentlst:
1411 while parentlst:
1409 n = parentlst.pop()
1412 n = parentlst.pop()
1410 if n not in hasset:
1413 if n not in hasset:
1411 hasset[n] = 1
1414 hasset[n] = 1
1412 p = [p for p in revlog.parents(n) if p != nullid]
1415 p = [p for p in revlog.parents(n) if p != nullid]
1413 parentlst.extend(p)
1416 parentlst.extend(p)
1414 for n in hasset:
1417 for n in hasset:
1415 msngset.pop(n, None)
1418 msngset.pop(n, None)
1416
1419
1417 # This is a function generating function used to set up an environment
1420 # This is a function generating function used to set up an environment
1418 # for the inner function to execute in.
1421 # for the inner function to execute in.
1419 def manifest_and_file_collector(changedfileset):
1422 def manifest_and_file_collector(changedfileset):
1420 # This is an information gathering function that gathers
1423 # This is an information gathering function that gathers
1421 # information from each changeset node that goes out as part of
1424 # information from each changeset node that goes out as part of
1422 # the changegroup. The information gathered is a list of which
1425 # the changegroup. The information gathered is a list of which
1423 # manifest nodes are potentially required (the recipient may
1426 # manifest nodes are potentially required (the recipient may
1424 # already have them) and total list of all files which were
1427 # already have them) and total list of all files which were
1425 # changed in any changeset in the changegroup.
1428 # changed in any changeset in the changegroup.
1426 #
1429 #
1427 # We also remember the first changenode we saw any manifest
1430 # We also remember the first changenode we saw any manifest
1428 # referenced by so we can later determine which changenode 'owns'
1431 # referenced by so we can later determine which changenode 'owns'
1429 # the manifest.
1432 # the manifest.
1430 def collect_manifests_and_files(clnode):
1433 def collect_manifests_and_files(clnode):
1431 c = cl.read(clnode)
1434 c = cl.read(clnode)
1432 for f in c[3]:
1435 for f in c[3]:
1433 # This is to make sure we only have one instance of each
1436 # This is to make sure we only have one instance of each
1434 # filename string for each filename.
1437 # filename string for each filename.
1435 changedfileset.setdefault(f, f)
1438 changedfileset.setdefault(f, f)
1436 msng_mnfst_set.setdefault(c[0], clnode)
1439 msng_mnfst_set.setdefault(c[0], clnode)
1437 return collect_manifests_and_files
1440 return collect_manifests_and_files
1438
1441
1439 # Figure out which manifest nodes (of the ones we think might be part
1442 # Figure out which manifest nodes (of the ones we think might be part
1440 # of the changegroup) the recipient must know about and remove them
1443 # of the changegroup) the recipient must know about and remove them
1441 # from the changegroup.
1444 # from the changegroup.
1442 def prune_manifests():
1445 def prune_manifests():
1443 has_mnfst_set = {}
1446 has_mnfst_set = {}
1444 for n in msng_mnfst_set:
1447 for n in msng_mnfst_set:
1445 # If a 'missing' manifest thinks it belongs to a changenode
1448 # If a 'missing' manifest thinks it belongs to a changenode
1446 # the recipient is assumed to have, obviously the recipient
1449 # the recipient is assumed to have, obviously the recipient
1447 # must have that manifest.
1450 # must have that manifest.
1448 linknode = cl.node(mnfst.linkrev(n))
1451 linknode = cl.node(mnfst.linkrev(n))
1449 if linknode in has_cl_set:
1452 if linknode in has_cl_set:
1450 has_mnfst_set[n] = 1
1453 has_mnfst_set[n] = 1
1451 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1454 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1452
1455
1453 # Use the information collected in collect_manifests_and_files to say
1456 # Use the information collected in collect_manifests_and_files to say
1454 # which changenode any manifestnode belongs to.
1457 # which changenode any manifestnode belongs to.
1455 def lookup_manifest_link(mnfstnode):
1458 def lookup_manifest_link(mnfstnode):
1456 return msng_mnfst_set[mnfstnode]
1459 return msng_mnfst_set[mnfstnode]
1457
1460
1458 # A function generating function that sets up the initial environment
1461 # A function generating function that sets up the initial environment
1459 # the inner function.
1462 # the inner function.
1460 def filenode_collector(changedfiles):
1463 def filenode_collector(changedfiles):
1461 next_rev = [0]
1464 next_rev = [0]
1462 # This gathers information from each manifestnode included in the
1465 # This gathers information from each manifestnode included in the
1463 # changegroup about which filenodes the manifest node references
1466 # changegroup about which filenodes the manifest node references
1464 # so we can include those in the changegroup too.
1467 # so we can include those in the changegroup too.
1465 #
1468 #
1466 # It also remembers which changenode each filenode belongs to. It
1469 # It also remembers which changenode each filenode belongs to. It
1467 # does this by assuming the a filenode belongs to the changenode
1470 # does this by assuming the a filenode belongs to the changenode
1468 # the first manifest that references it belongs to.
1471 # the first manifest that references it belongs to.
1469 def collect_msng_filenodes(mnfstnode):
1472 def collect_msng_filenodes(mnfstnode):
1470 r = mnfst.rev(mnfstnode)
1473 r = mnfst.rev(mnfstnode)
1471 if r == next_rev[0]:
1474 if r == next_rev[0]:
1472 # If the last rev we looked at was the one just previous,
1475 # If the last rev we looked at was the one just previous,
1473 # we only need to see a diff.
1476 # we only need to see a diff.
1474 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1477 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1475 # For each line in the delta
1478 # For each line in the delta
1476 for dline in delta.splitlines():
1479 for dline in delta.splitlines():
1477 # get the filename and filenode for that line
1480 # get the filename and filenode for that line
1478 f, fnode = dline.split('\0')
1481 f, fnode = dline.split('\0')
1479 fnode = bin(fnode[:40])
1482 fnode = bin(fnode[:40])
1480 f = changedfiles.get(f, None)
1483 f = changedfiles.get(f, None)
1481 # And if the file is in the list of files we care
1484 # And if the file is in the list of files we care
1482 # about.
1485 # about.
1483 if f is not None:
1486 if f is not None:
1484 # Get the changenode this manifest belongs to
1487 # Get the changenode this manifest belongs to
1485 clnode = msng_mnfst_set[mnfstnode]
1488 clnode = msng_mnfst_set[mnfstnode]
1486 # Create the set of filenodes for the file if
1489 # Create the set of filenodes for the file if
1487 # there isn't one already.
1490 # there isn't one already.
1488 ndset = msng_filenode_set.setdefault(f, {})
1491 ndset = msng_filenode_set.setdefault(f, {})
1489 # And set the filenode's changelog node to the
1492 # And set the filenode's changelog node to the
1490 # manifest's if it hasn't been set already.
1493 # manifest's if it hasn't been set already.
1491 ndset.setdefault(fnode, clnode)
1494 ndset.setdefault(fnode, clnode)
1492 else:
1495 else:
1493 # Otherwise we need a full manifest.
1496 # Otherwise we need a full manifest.
1494 m = mnfst.read(mnfstnode)
1497 m = mnfst.read(mnfstnode)
1495 # For every file in we care about.
1498 # For every file in we care about.
1496 for f in changedfiles:
1499 for f in changedfiles:
1497 fnode = m.get(f, None)
1500 fnode = m.get(f, None)
1498 # If it's in the manifest
1501 # If it's in the manifest
1499 if fnode is not None:
1502 if fnode is not None:
1500 # See comments above.
1503 # See comments above.
1501 clnode = msng_mnfst_set[mnfstnode]
1504 clnode = msng_mnfst_set[mnfstnode]
1502 ndset = msng_filenode_set.setdefault(f, {})
1505 ndset = msng_filenode_set.setdefault(f, {})
1503 ndset.setdefault(fnode, clnode)
1506 ndset.setdefault(fnode, clnode)
1504 # Remember the revision we hope to see next.
1507 # Remember the revision we hope to see next.
1505 next_rev[0] = r + 1
1508 next_rev[0] = r + 1
1506 return collect_msng_filenodes
1509 return collect_msng_filenodes
1507
1510
1508 # We have a list of filenodes we think we need for a file, lets remove
1511 # We have a list of filenodes we think we need for a file, lets remove
1509 # all those we now the recipient must have.
1512 # all those we now the recipient must have.
1510 def prune_filenodes(f, filerevlog):
1513 def prune_filenodes(f, filerevlog):
1511 msngset = msng_filenode_set[f]
1514 msngset = msng_filenode_set[f]
1512 hasset = {}
1515 hasset = {}
1513 # If a 'missing' filenode thinks it belongs to a changenode we
1516 # If a 'missing' filenode thinks it belongs to a changenode we
1514 # assume the recipient must have, then the recipient must have
1517 # assume the recipient must have, then the recipient must have
1515 # that filenode.
1518 # that filenode.
1516 for n in msngset:
1519 for n in msngset:
1517 clnode = cl.node(filerevlog.linkrev(n))
1520 clnode = cl.node(filerevlog.linkrev(n))
1518 if clnode in has_cl_set:
1521 if clnode in has_cl_set:
1519 hasset[n] = 1
1522 hasset[n] = 1
1520 prune_parents(filerevlog, hasset, msngset)
1523 prune_parents(filerevlog, hasset, msngset)
1521
1524
1522 # A function generator function that sets up the a context for the
1525 # A function generator function that sets up the a context for the
1523 # inner function.
1526 # inner function.
1524 def lookup_filenode_link_func(fname):
1527 def lookup_filenode_link_func(fname):
1525 msngset = msng_filenode_set[fname]
1528 msngset = msng_filenode_set[fname]
1526 # Lookup the changenode the filenode belongs to.
1529 # Lookup the changenode the filenode belongs to.
1527 def lookup_filenode_link(fnode):
1530 def lookup_filenode_link(fnode):
1528 return msngset[fnode]
1531 return msngset[fnode]
1529 return lookup_filenode_link
1532 return lookup_filenode_link
1530
1533
1531 # Now that we have all theses utility functions to help out and
1534 # Now that we have all theses utility functions to help out and
1532 # logically divide up the task, generate the group.
1535 # logically divide up the task, generate the group.
1533 def gengroup():
1536 def gengroup():
1534 # The set of changed files starts empty.
1537 # The set of changed files starts empty.
1535 changedfiles = {}
1538 changedfiles = {}
1536 # Create a changenode group generator that will call our functions
1539 # Create a changenode group generator that will call our functions
1537 # back to lookup the owning changenode and collect information.
1540 # back to lookup the owning changenode and collect information.
1538 group = cl.group(msng_cl_lst, identity,
1541 group = cl.group(msng_cl_lst, identity,
1539 manifest_and_file_collector(changedfiles))
1542 manifest_and_file_collector(changedfiles))
1540 for chnk in group:
1543 for chnk in group:
1541 yield chnk
1544 yield chnk
1542
1545
1543 # The list of manifests has been collected by the generator
1546 # The list of manifests has been collected by the generator
1544 # calling our functions back.
1547 # calling our functions back.
1545 prune_manifests()
1548 prune_manifests()
1546 msng_mnfst_lst = msng_mnfst_set.keys()
1549 msng_mnfst_lst = msng_mnfst_set.keys()
1547 # Sort the manifestnodes by revision number.
1550 # Sort the manifestnodes by revision number.
1548 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1551 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1549 # Create a generator for the manifestnodes that calls our lookup
1552 # Create a generator for the manifestnodes that calls our lookup
1550 # and data collection functions back.
1553 # and data collection functions back.
1551 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1554 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1552 filenode_collector(changedfiles))
1555 filenode_collector(changedfiles))
1553 for chnk in group:
1556 for chnk in group:
1554 yield chnk
1557 yield chnk
1555
1558
1556 # These are no longer needed, dereference and toss the memory for
1559 # These are no longer needed, dereference and toss the memory for
1557 # them.
1560 # them.
1558 msng_mnfst_lst = None
1561 msng_mnfst_lst = None
1559 msng_mnfst_set.clear()
1562 msng_mnfst_set.clear()
1560
1563
1561 changedfiles = changedfiles.keys()
1564 changedfiles = changedfiles.keys()
1562 changedfiles.sort()
1565 changedfiles.sort()
1563 # Go through all our files in order sorted by name.
1566 # Go through all our files in order sorted by name.
1564 for fname in changedfiles:
1567 for fname in changedfiles:
1565 filerevlog = self.file(fname)
1568 filerevlog = self.file(fname)
1566 # Toss out the filenodes that the recipient isn't really
1569 # Toss out the filenodes that the recipient isn't really
1567 # missing.
1570 # missing.
1568 if msng_filenode_set.has_key(fname):
1571 if msng_filenode_set.has_key(fname):
1569 prune_filenodes(fname, filerevlog)
1572 prune_filenodes(fname, filerevlog)
1570 msng_filenode_lst = msng_filenode_set[fname].keys()
1573 msng_filenode_lst = msng_filenode_set[fname].keys()
1571 else:
1574 else:
1572 msng_filenode_lst = []
1575 msng_filenode_lst = []
1573 # If any filenodes are left, generate the group for them,
1576 # If any filenodes are left, generate the group for them,
1574 # otherwise don't bother.
1577 # otherwise don't bother.
1575 if len(msng_filenode_lst) > 0:
1578 if len(msng_filenode_lst) > 0:
1576 yield changegroup.genchunk(fname)
1579 yield changegroup.genchunk(fname)
1577 # Sort the filenodes by their revision #
1580 # Sort the filenodes by their revision #
1578 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1581 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1579 # Create a group generator and only pass in a changenode
1582 # Create a group generator and only pass in a changenode
1580 # lookup function as we need to collect no information
1583 # lookup function as we need to collect no information
1581 # from filenodes.
1584 # from filenodes.
1582 group = filerevlog.group(msng_filenode_lst,
1585 group = filerevlog.group(msng_filenode_lst,
1583 lookup_filenode_link_func(fname))
1586 lookup_filenode_link_func(fname))
1584 for chnk in group:
1587 for chnk in group:
1585 yield chnk
1588 yield chnk
1586 if msng_filenode_set.has_key(fname):
1589 if msng_filenode_set.has_key(fname):
1587 # Don't need this anymore, toss it to free memory.
1590 # Don't need this anymore, toss it to free memory.
1588 del msng_filenode_set[fname]
1591 del msng_filenode_set[fname]
1589 # Signal that no more groups are left.
1592 # Signal that no more groups are left.
1590 yield changegroup.closechunk()
1593 yield changegroup.closechunk()
1591
1594
1592 if msng_cl_lst:
1595 if msng_cl_lst:
1593 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1596 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1594
1597
1595 return util.chunkbuffer(gengroup())
1598 return util.chunkbuffer(gengroup())
1596
1599
1597 def changegroup(self, basenodes, source):
1600 def changegroup(self, basenodes, source):
1598 """Generate a changegroup of all nodes that we have that a recipient
1601 """Generate a changegroup of all nodes that we have that a recipient
1599 doesn't.
1602 doesn't.
1600
1603
1601 This is much easier than the previous function as we can assume that
1604 This is much easier than the previous function as we can assume that
1602 the recipient has any changenode we aren't sending them."""
1605 the recipient has any changenode we aren't sending them."""
1603
1606
1604 self.hook('preoutgoing', throw=True, source=source)
1607 self.hook('preoutgoing', throw=True, source=source)
1605
1608
1606 cl = self.changelog
1609 cl = self.changelog
1607 nodes = cl.nodesbetween(basenodes, None)[0]
1610 nodes = cl.nodesbetween(basenodes, None)[0]
1608 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1611 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1609
1612
1610 def identity(x):
1613 def identity(x):
1611 return x
1614 return x
1612
1615
1613 def gennodelst(revlog):
1616 def gennodelst(revlog):
1614 for r in xrange(0, revlog.count()):
1617 for r in xrange(0, revlog.count()):
1615 n = revlog.node(r)
1618 n = revlog.node(r)
1616 if revlog.linkrev(n) in revset:
1619 if revlog.linkrev(n) in revset:
1617 yield n
1620 yield n
1618
1621
1619 def changed_file_collector(changedfileset):
1622 def changed_file_collector(changedfileset):
1620 def collect_changed_files(clnode):
1623 def collect_changed_files(clnode):
1621 c = cl.read(clnode)
1624 c = cl.read(clnode)
1622 for fname in c[3]:
1625 for fname in c[3]:
1623 changedfileset[fname] = 1
1626 changedfileset[fname] = 1
1624 return collect_changed_files
1627 return collect_changed_files
1625
1628
1626 def lookuprevlink_func(revlog):
1629 def lookuprevlink_func(revlog):
1627 def lookuprevlink(n):
1630 def lookuprevlink(n):
1628 return cl.node(revlog.linkrev(n))
1631 return cl.node(revlog.linkrev(n))
1629 return lookuprevlink
1632 return lookuprevlink
1630
1633
1631 def gengroup():
1634 def gengroup():
1632 # construct a list of all changed files
1635 # construct a list of all changed files
1633 changedfiles = {}
1636 changedfiles = {}
1634
1637
1635 for chnk in cl.group(nodes, identity,
1638 for chnk in cl.group(nodes, identity,
1636 changed_file_collector(changedfiles)):
1639 changed_file_collector(changedfiles)):
1637 yield chnk
1640 yield chnk
1638 changedfiles = changedfiles.keys()
1641 changedfiles = changedfiles.keys()
1639 changedfiles.sort()
1642 changedfiles.sort()
1640
1643
1641 mnfst = self.manifest
1644 mnfst = self.manifest
1642 nodeiter = gennodelst(mnfst)
1645 nodeiter = gennodelst(mnfst)
1643 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1646 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1644 yield chnk
1647 yield chnk
1645
1648
1646 for fname in changedfiles:
1649 for fname in changedfiles:
1647 filerevlog = self.file(fname)
1650 filerevlog = self.file(fname)
1648 nodeiter = gennodelst(filerevlog)
1651 nodeiter = gennodelst(filerevlog)
1649 nodeiter = list(nodeiter)
1652 nodeiter = list(nodeiter)
1650 if nodeiter:
1653 if nodeiter:
1651 yield changegroup.genchunk(fname)
1654 yield changegroup.genchunk(fname)
1652 lookup = lookuprevlink_func(filerevlog)
1655 lookup = lookuprevlink_func(filerevlog)
1653 for chnk in filerevlog.group(nodeiter, lookup):
1656 for chnk in filerevlog.group(nodeiter, lookup):
1654 yield chnk
1657 yield chnk
1655
1658
1656 yield changegroup.closechunk()
1659 yield changegroup.closechunk()
1657
1660
1658 if nodes:
1661 if nodes:
1659 self.hook('outgoing', node=hex(nodes[0]), source=source)
1662 self.hook('outgoing', node=hex(nodes[0]), source=source)
1660
1663
1661 return util.chunkbuffer(gengroup())
1664 return util.chunkbuffer(gengroup())
1662
1665
1663 def addchangegroup(self, source, srctype, url):
1666 def addchangegroup(self, source, srctype, url):
1664 """add changegroup to repo.
1667 """add changegroup to repo.
1665 returns number of heads modified or added + 1."""
1668 returns number of heads modified or added + 1."""
1666
1669
1667 def csmap(x):
1670 def csmap(x):
1668 self.ui.debug(_("add changeset %s\n") % short(x))
1671 self.ui.debug(_("add changeset %s\n") % short(x))
1669 return cl.count()
1672 return cl.count()
1670
1673
1671 def revmap(x):
1674 def revmap(x):
1672 return cl.rev(x)
1675 return cl.rev(x)
1673
1676
1674 if not source:
1677 if not source:
1675 return 0
1678 return 0
1676
1679
1677 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1680 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1678
1681
1679 changesets = files = revisions = 0
1682 changesets = files = revisions = 0
1680
1683
1681 tr = self.transaction()
1684 tr = self.transaction()
1682
1685
1683 # write changelog data to temp files so concurrent readers will not see
1686 # write changelog data to temp files so concurrent readers will not see
1684 # inconsistent view
1687 # inconsistent view
1685 cl = None
1688 cl = None
1686 try:
1689 try:
1687 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1690 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1688
1691
1689 oldheads = len(cl.heads())
1692 oldheads = len(cl.heads())
1690
1693
1691 # pull off the changeset group
1694 # pull off the changeset group
1692 self.ui.status(_("adding changesets\n"))
1695 self.ui.status(_("adding changesets\n"))
1693 cor = cl.count() - 1
1696 cor = cl.count() - 1
1694 chunkiter = changegroup.chunkiter(source)
1697 chunkiter = changegroup.chunkiter(source)
1695 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1698 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1696 raise util.Abort(_("received changelog group is empty"))
1699 raise util.Abort(_("received changelog group is empty"))
1697 cnr = cl.count() - 1
1700 cnr = cl.count() - 1
1698 changesets = cnr - cor
1701 changesets = cnr - cor
1699
1702
1700 # pull off the manifest group
1703 # pull off the manifest group
1701 self.ui.status(_("adding manifests\n"))
1704 self.ui.status(_("adding manifests\n"))
1702 chunkiter = changegroup.chunkiter(source)
1705 chunkiter = changegroup.chunkiter(source)
1703 # no need to check for empty manifest group here:
1706 # no need to check for empty manifest group here:
1704 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1707 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1705 # no new manifest will be created and the manifest group will
1708 # no new manifest will be created and the manifest group will
1706 # be empty during the pull
1709 # be empty during the pull
1707 self.manifest.addgroup(chunkiter, revmap, tr)
1710 self.manifest.addgroup(chunkiter, revmap, tr)
1708
1711
1709 # process the files
1712 # process the files
1710 self.ui.status(_("adding file changes\n"))
1713 self.ui.status(_("adding file changes\n"))
1711 while 1:
1714 while 1:
1712 f = changegroup.getchunk(source)
1715 f = changegroup.getchunk(source)
1713 if not f:
1716 if not f:
1714 break
1717 break
1715 self.ui.debug(_("adding %s revisions\n") % f)
1718 self.ui.debug(_("adding %s revisions\n") % f)
1716 fl = self.file(f)
1719 fl = self.file(f)
1717 o = fl.count()
1720 o = fl.count()
1718 chunkiter = changegroup.chunkiter(source)
1721 chunkiter = changegroup.chunkiter(source)
1719 if fl.addgroup(chunkiter, revmap, tr) is None:
1722 if fl.addgroup(chunkiter, revmap, tr) is None:
1720 raise util.Abort(_("received file revlog group is empty"))
1723 raise util.Abort(_("received file revlog group is empty"))
1721 revisions += fl.count() - o
1724 revisions += fl.count() - o
1722 files += 1
1725 files += 1
1723
1726
1724 cl.writedata()
1727 cl.writedata()
1725 finally:
1728 finally:
1726 if cl:
1729 if cl:
1727 cl.cleanup()
1730 cl.cleanup()
1728
1731
1729 # make changelog see real files again
1732 # make changelog see real files again
1730 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1733 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1731 self.changelog.checkinlinesize(tr)
1734 self.changelog.checkinlinesize(tr)
1732
1735
1733 newheads = len(self.changelog.heads())
1736 newheads = len(self.changelog.heads())
1734 heads = ""
1737 heads = ""
1735 if oldheads and newheads != oldheads:
1738 if oldheads and newheads != oldheads:
1736 heads = _(" (%+d heads)") % (newheads - oldheads)
1739 heads = _(" (%+d heads)") % (newheads - oldheads)
1737
1740
1738 self.ui.status(_("added %d changesets"
1741 self.ui.status(_("added %d changesets"
1739 " with %d changes to %d files%s\n")
1742 " with %d changes to %d files%s\n")
1740 % (changesets, revisions, files, heads))
1743 % (changesets, revisions, files, heads))
1741
1744
1742 if changesets > 0:
1745 if changesets > 0:
1743 self.hook('pretxnchangegroup', throw=True,
1746 self.hook('pretxnchangegroup', throw=True,
1744 node=hex(self.changelog.node(cor+1)), source=srctype,
1747 node=hex(self.changelog.node(cor+1)), source=srctype,
1745 url=url)
1748 url=url)
1746
1749
1747 tr.close()
1750 tr.close()
1748
1751
1749 if changesets > 0:
1752 if changesets > 0:
1750 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1753 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1751 source=srctype, url=url)
1754 source=srctype, url=url)
1752
1755
1753 for i in range(cor + 1, cnr + 1):
1756 for i in range(cor + 1, cnr + 1):
1754 self.hook("incoming", node=hex(self.changelog.node(i)),
1757 self.hook("incoming", node=hex(self.changelog.node(i)),
1755 source=srctype, url=url)
1758 source=srctype, url=url)
1756
1759
1757 return newheads - oldheads + 1
1760 return newheads - oldheads + 1
1758
1761
1759
1762
1760 def stream_in(self, remote):
1763 def stream_in(self, remote):
1761 fp = remote.stream_out()
1764 fp = remote.stream_out()
1762 resp = int(fp.readline())
1765 resp = int(fp.readline())
1763 if resp != 0:
1766 if resp != 0:
1764 raise util.Abort(_('operation forbidden by server'))
1767 raise util.Abort(_('operation forbidden by server'))
1765 self.ui.status(_('streaming all changes\n'))
1768 self.ui.status(_('streaming all changes\n'))
1766 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1769 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1767 self.ui.status(_('%d files to transfer, %s of data\n') %
1770 self.ui.status(_('%d files to transfer, %s of data\n') %
1768 (total_files, util.bytecount(total_bytes)))
1771 (total_files, util.bytecount(total_bytes)))
1769 start = time.time()
1772 start = time.time()
1770 for i in xrange(total_files):
1773 for i in xrange(total_files):
1771 name, size = fp.readline().split('\0', 1)
1774 name, size = fp.readline().split('\0', 1)
1772 size = int(size)
1775 size = int(size)
1773 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1776 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1774 ofp = self.opener(name, 'w')
1777 ofp = self.opener(name, 'w')
1775 for chunk in util.filechunkiter(fp, limit=size):
1778 for chunk in util.filechunkiter(fp, limit=size):
1776 ofp.write(chunk)
1779 ofp.write(chunk)
1777 ofp.close()
1780 ofp.close()
1778 elapsed = time.time() - start
1781 elapsed = time.time() - start
1779 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1782 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1780 (util.bytecount(total_bytes), elapsed,
1783 (util.bytecount(total_bytes), elapsed,
1781 util.bytecount(total_bytes / elapsed)))
1784 util.bytecount(total_bytes / elapsed)))
1782 self.reload()
1785 self.reload()
1783 return len(self.heads()) + 1
1786 return len(self.heads()) + 1
1784
1787
1785 def clone(self, remote, heads=[], stream=False):
1788 def clone(self, remote, heads=[], stream=False):
1786 '''clone remote repository.
1789 '''clone remote repository.
1787
1790
1788 keyword arguments:
1791 keyword arguments:
1789 heads: list of revs to clone (forces use of pull)
1792 heads: list of revs to clone (forces use of pull)
1790 stream: use streaming clone if possible'''
1793 stream: use streaming clone if possible'''
1791
1794
1792 # now, all clients that can request uncompressed clones can
1795 # now, all clients that can request uncompressed clones can
1793 # read repo formats supported by all servers that can serve
1796 # read repo formats supported by all servers that can serve
1794 # them.
1797 # them.
1795
1798
1796 # if revlog format changes, client will have to check version
1799 # if revlog format changes, client will have to check version
1797 # and format flags on "stream" capability, and use
1800 # and format flags on "stream" capability, and use
1798 # uncompressed only if compatible.
1801 # uncompressed only if compatible.
1799
1802
1800 if stream and not heads and remote.capable('stream'):
1803 if stream and not heads and remote.capable('stream'):
1801 return self.stream_in(remote)
1804 return self.stream_in(remote)
1802 return self.pull(remote, heads)
1805 return self.pull(remote, heads)
1803
1806
1804 # used to avoid circular references so destructors work
1807 # used to avoid circular references so destructors work
1805 def aftertrans(base):
1808 def aftertrans(base):
1806 p = base
1809 p = base
1807 def a():
1810 def a():
1808 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1811 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1809 util.rename(os.path.join(p, "journal.dirstate"),
1812 util.rename(os.path.join(p, "journal.dirstate"),
1810 os.path.join(p, "undo.dirstate"))
1813 os.path.join(p, "undo.dirstate"))
1811 return a
1814 return a
1812
1815
1813 def instance(ui, path, create):
1816 def instance(ui, path, create):
1814 return localrepository(ui, util.drop_scheme('file', path), create)
1817 return localrepository(ui, util.drop_scheme('file', path), create)
1815
1818
1816 def islocal(path):
1819 def islocal(path):
1817 return True
1820 return True
@@ -1,1268 +1,1282 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
7 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "binascii changegroup errno ancestor mdiff os")
16 demandload(globals(), "binascii changegroup errno ancestor mdiff os")
17 demandload(globals(), "sha struct util zlib")
17 demandload(globals(), "sha struct util zlib")
18
18
19 # revlog version strings
19 # revlog version strings
20 REVLOGV0 = 0
20 REVLOGV0 = 0
21 REVLOGNG = 1
21 REVLOGNG = 1
22
22
23 # revlog flags
23 # revlog flags
24 REVLOGNGINLINEDATA = (1 << 16)
24 REVLOGNGINLINEDATA = (1 << 16)
25 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
25 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
26
26
27 REVLOG_DEFAULT_FORMAT = REVLOGNG
27 REVLOG_DEFAULT_FORMAT = REVLOGNG
28 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
28 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
29
29
30 def flagstr(flag):
30 def flagstr(flag):
31 if flag == "inline":
31 if flag == "inline":
32 return REVLOGNGINLINEDATA
32 return REVLOGNGINLINEDATA
33 raise RevlogError(_("unknown revlog flag %s" % flag))
33 raise RevlogError(_("unknown revlog flag %s" % flag))
34
34
35 def hash(text, p1, p2):
35 def hash(text, p1, p2):
36 """generate a hash from the given text and its parent hashes
36 """generate a hash from the given text and its parent hashes
37
37
38 This hash combines both the current file contents and its history
38 This hash combines both the current file contents and its history
39 in a manner that makes it easy to distinguish nodes with the same
39 in a manner that makes it easy to distinguish nodes with the same
40 content in the revision graph.
40 content in the revision graph.
41 """
41 """
42 l = [p1, p2]
42 l = [p1, p2]
43 l.sort()
43 l.sort()
44 s = sha.new(l[0])
44 s = sha.new(l[0])
45 s.update(l[1])
45 s.update(l[1])
46 s.update(text)
46 s.update(text)
47 return s.digest()
47 return s.digest()
48
48
49 def compress(text):
49 def compress(text):
50 """ generate a possibly-compressed representation of text """
50 """ generate a possibly-compressed representation of text """
51 if not text: return ("", text)
51 if not text: return ("", text)
52 if len(text) < 44:
52 if len(text) < 44:
53 if text[0] == '\0': return ("", text)
53 if text[0] == '\0': return ("", text)
54 return ('u', text)
54 return ('u', text)
55 bin = zlib.compress(text)
55 bin = zlib.compress(text)
56 if len(bin) > len(text):
56 if len(bin) > len(text):
57 if text[0] == '\0': return ("", text)
57 if text[0] == '\0': return ("", text)
58 return ('u', text)
58 return ('u', text)
59 return ("", bin)
59 return ("", bin)
60
60
61 def decompress(bin):
61 def decompress(bin):
62 """ decompress the given input """
62 """ decompress the given input """
63 if not bin: return bin
63 if not bin: return bin
64 t = bin[0]
64 t = bin[0]
65 if t == '\0': return bin
65 if t == '\0': return bin
66 if t == 'x': return zlib.decompress(bin)
66 if t == 'x': return zlib.decompress(bin)
67 if t == 'u': return bin[1:]
67 if t == 'u': return bin[1:]
68 raise RevlogError(_("unknown compression type %r") % t)
68 raise RevlogError(_("unknown compression type %r") % t)
69
69
70 indexformatv0 = ">4l20s20s20s"
70 indexformatv0 = ">4l20s20s20s"
71 v0shaoffset = 56
71 v0shaoffset = 56
72 # index ng:
72 # index ng:
73 # 6 bytes offset
73 # 6 bytes offset
74 # 2 bytes flags
74 # 2 bytes flags
75 # 4 bytes compressed length
75 # 4 bytes compressed length
76 # 4 bytes uncompressed length
76 # 4 bytes uncompressed length
77 # 4 bytes: base rev
77 # 4 bytes: base rev
78 # 4 bytes link rev
78 # 4 bytes link rev
79 # 4 bytes parent 1 rev
79 # 4 bytes parent 1 rev
80 # 4 bytes parent 2 rev
80 # 4 bytes parent 2 rev
81 # 32 bytes: nodeid
81 # 32 bytes: nodeid
82 indexformatng = ">Qiiiiii20s12x"
82 indexformatng = ">Qiiiiii20s12x"
83 ngshaoffset = 32
83 ngshaoffset = 32
84 versionformat = ">i"
84 versionformat = ">i"
85
85
86 class lazyparser(object):
86 class lazyparser(object):
87 """
87 """
88 this class avoids the need to parse the entirety of large indices
88 this class avoids the need to parse the entirety of large indices
89 """
89 """
90
90
91 # lazyparser is not safe to use on windows if win32 extensions not
91 # lazyparser is not safe to use on windows if win32 extensions not
92 # available. it keeps file handle open, which make it not possible
92 # available. it keeps file handle open, which make it not possible
93 # to break hardlinks on local cloned repos.
93 # to break hardlinks on local cloned repos.
94 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
94 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
95 hasattr(util, 'win32api'))
95 hasattr(util, 'win32api'))
96
96
97 def __init__(self, dataf, size, indexformat, shaoffset):
97 def __init__(self, dataf, size, indexformat, shaoffset):
98 self.dataf = dataf
98 self.dataf = dataf
99 self.format = indexformat
99 self.format = indexformat
100 self.s = struct.calcsize(indexformat)
100 self.s = struct.calcsize(indexformat)
101 self.indexformat = indexformat
101 self.indexformat = indexformat
102 self.datasize = size
102 self.datasize = size
103 self.l = size/self.s
103 self.l = size/self.s
104 self.index = [None] * self.l
104 self.index = [None] * self.l
105 self.map = {nullid: -1}
105 self.map = {nullid: -1}
106 self.allmap = 0
106 self.allmap = 0
107 self.all = 0
107 self.all = 0
108 self.mapfind_count = 0
108 self.mapfind_count = 0
109 self.shaoffset = shaoffset
109 self.shaoffset = shaoffset
110
110
111 def loadmap(self):
111 def loadmap(self):
112 """
112 """
113 during a commit, we need to make sure the rev being added is
113 during a commit, we need to make sure the rev being added is
114 not a duplicate. This requires loading the entire index,
114 not a duplicate. This requires loading the entire index,
115 which is fairly slow. loadmap can load up just the node map,
115 which is fairly slow. loadmap can load up just the node map,
116 which takes much less time.
116 which takes much less time.
117 """
117 """
118 if self.allmap: return
118 if self.allmap: return
119 end = self.datasize
119 end = self.datasize
120 self.allmap = 1
120 self.allmap = 1
121 cur = 0
121 cur = 0
122 count = 0
122 count = 0
123 blocksize = self.s * 256
123 blocksize = self.s * 256
124 self.dataf.seek(0)
124 self.dataf.seek(0)
125 while cur < end:
125 while cur < end:
126 data = self.dataf.read(blocksize)
126 data = self.dataf.read(blocksize)
127 off = 0
127 off = 0
128 for x in xrange(256):
128 for x in xrange(256):
129 n = data[off + self.shaoffset:off + self.shaoffset + 20]
129 n = data[off + self.shaoffset:off + self.shaoffset + 20]
130 self.map[n] = count
130 self.map[n] = count
131 count += 1
131 count += 1
132 if count >= self.l:
132 if count >= self.l:
133 break
133 break
134 off += self.s
134 off += self.s
135 cur += blocksize
135 cur += blocksize
136
136
137 def loadblock(self, blockstart, blocksize, data=None):
137 def loadblock(self, blockstart, blocksize, data=None):
138 if self.all: return
138 if self.all: return
139 if data is None:
139 if data is None:
140 self.dataf.seek(blockstart)
140 self.dataf.seek(blockstart)
141 if blockstart + blocksize > self.datasize:
141 if blockstart + blocksize > self.datasize:
142 # the revlog may have grown since we've started running,
142 # the revlog may have grown since we've started running,
143 # but we don't have space in self.index for more entries.
143 # but we don't have space in self.index for more entries.
144 # limit blocksize so that we don't get too much data.
144 # limit blocksize so that we don't get too much data.
145 blocksize = max(self.datasize - blockstart, 0)
145 blocksize = max(self.datasize - blockstart, 0)
146 data = self.dataf.read(blocksize)
146 data = self.dataf.read(blocksize)
147 lend = len(data) / self.s
147 lend = len(data) / self.s
148 i = blockstart / self.s
148 i = blockstart / self.s
149 off = 0
149 off = 0
150 for x in xrange(lend):
150 for x in xrange(lend):
151 if self.index[i + x] == None:
151 if self.index[i + x] == None:
152 b = data[off : off + self.s]
152 b = data[off : off + self.s]
153 self.index[i + x] = b
153 self.index[i + x] = b
154 n = b[self.shaoffset:self.shaoffset + 20]
154 n = b[self.shaoffset:self.shaoffset + 20]
155 self.map[n] = i + x
155 self.map[n] = i + x
156 off += self.s
156 off += self.s
157
157
158 def findnode(self, node):
158 def findnode(self, node):
159 """search backwards through the index file for a specific node"""
159 """search backwards through the index file for a specific node"""
160 if self.allmap: return None
160 if self.allmap: return None
161
161
162 # hg log will cause many many searches for the manifest
162 # hg log will cause many many searches for the manifest
163 # nodes. After we get called a few times, just load the whole
163 # nodes. After we get called a few times, just load the whole
164 # thing.
164 # thing.
165 if self.mapfind_count > 8:
165 if self.mapfind_count > 8:
166 self.loadmap()
166 self.loadmap()
167 if node in self.map:
167 if node in self.map:
168 return node
168 return node
169 return None
169 return None
170 self.mapfind_count += 1
170 self.mapfind_count += 1
171 last = self.l - 1
171 last = self.l - 1
172 while self.index[last] != None:
172 while self.index[last] != None:
173 if last == 0:
173 if last == 0:
174 self.all = 1
174 self.all = 1
175 self.allmap = 1
175 self.allmap = 1
176 return None
176 return None
177 last -= 1
177 last -= 1
178 end = (last + 1) * self.s
178 end = (last + 1) * self.s
179 blocksize = self.s * 256
179 blocksize = self.s * 256
180 while end >= 0:
180 while end >= 0:
181 start = max(end - blocksize, 0)
181 start = max(end - blocksize, 0)
182 self.dataf.seek(start)
182 self.dataf.seek(start)
183 data = self.dataf.read(end - start)
183 data = self.dataf.read(end - start)
184 findend = end - start
184 findend = end - start
185 while True:
185 while True:
186 # we're searching backwards, so weh have to make sure
186 # we're searching backwards, so weh have to make sure
187 # we don't find a changeset where this node is a parent
187 # we don't find a changeset where this node is a parent
188 off = data.rfind(node, 0, findend)
188 off = data.rfind(node, 0, findend)
189 findend = off
189 findend = off
190 if off >= 0:
190 if off >= 0:
191 i = off / self.s
191 i = off / self.s
192 off = i * self.s
192 off = i * self.s
193 n = data[off + self.shaoffset:off + self.shaoffset + 20]
193 n = data[off + self.shaoffset:off + self.shaoffset + 20]
194 if n == node:
194 if n == node:
195 self.map[n] = i + start / self.s
195 self.map[n] = i + start / self.s
196 return node
196 return node
197 else:
197 else:
198 break
198 break
199 end -= blocksize
199 end -= blocksize
200 return None
200 return None
201
201
202 def loadindex(self, i=None, end=None):
202 def loadindex(self, i=None, end=None):
203 if self.all: return
203 if self.all: return
204 all = False
204 all = False
205 if i == None:
205 if i == None:
206 blockstart = 0
206 blockstart = 0
207 blocksize = (512 / self.s) * self.s
207 blocksize = (512 / self.s) * self.s
208 end = self.datasize
208 end = self.datasize
209 all = True
209 all = True
210 else:
210 else:
211 if end:
211 if end:
212 blockstart = i * self.s
212 blockstart = i * self.s
213 end = end * self.s
213 end = end * self.s
214 blocksize = end - blockstart
214 blocksize = end - blockstart
215 else:
215 else:
216 blockstart = (i & ~(32)) * self.s
216 blockstart = (i & ~(32)) * self.s
217 blocksize = self.s * 64
217 blocksize = self.s * 64
218 end = blockstart + blocksize
218 end = blockstart + blocksize
219 while blockstart < end:
219 while blockstart < end:
220 self.loadblock(blockstart, blocksize)
220 self.loadblock(blockstart, blocksize)
221 blockstart += blocksize
221 blockstart += blocksize
222 if all: self.all = True
222 if all: self.all = True
223
223
224 class lazyindex(object):
224 class lazyindex(object):
225 """a lazy version of the index array"""
225 """a lazy version of the index array"""
226 def __init__(self, parser):
226 def __init__(self, parser):
227 self.p = parser
227 self.p = parser
228 def __len__(self):
228 def __len__(self):
229 return len(self.p.index)
229 return len(self.p.index)
230 def load(self, pos):
230 def load(self, pos):
231 if pos < 0:
231 if pos < 0:
232 pos += len(self.p.index)
232 pos += len(self.p.index)
233 self.p.loadindex(pos)
233 self.p.loadindex(pos)
234 return self.p.index[pos]
234 return self.p.index[pos]
235 def __getitem__(self, pos):
235 def __getitem__(self, pos):
236 ret = self.p.index[pos] or self.load(pos)
236 ret = self.p.index[pos] or self.load(pos)
237 if isinstance(ret, str):
237 if isinstance(ret, str):
238 ret = struct.unpack(self.p.indexformat, ret)
238 ret = struct.unpack(self.p.indexformat, ret)
239 return ret
239 return ret
240 def __setitem__(self, pos, item):
240 def __setitem__(self, pos, item):
241 self.p.index[pos] = item
241 self.p.index[pos] = item
242 def __delitem__(self, pos):
242 def __delitem__(self, pos):
243 del self.p.index[pos]
243 del self.p.index[pos]
244 def append(self, e):
244 def append(self, e):
245 self.p.index.append(e)
245 self.p.index.append(e)
246
246
247 class lazymap(object):
247 class lazymap(object):
248 """a lazy version of the node map"""
248 """a lazy version of the node map"""
249 def __init__(self, parser):
249 def __init__(self, parser):
250 self.p = parser
250 self.p = parser
251 def load(self, key):
251 def load(self, key):
252 n = self.p.findnode(key)
252 n = self.p.findnode(key)
253 if n == None:
253 if n == None:
254 raise KeyError(key)
254 raise KeyError(key)
255 def __contains__(self, key):
255 def __contains__(self, key):
256 if key in self.p.map:
256 if key in self.p.map:
257 return True
257 return True
258 self.p.loadmap()
258 self.p.loadmap()
259 return key in self.p.map
259 return key in self.p.map
260 def __iter__(self):
260 def __iter__(self):
261 yield nullid
261 yield nullid
262 for i in xrange(self.p.l):
262 for i in xrange(self.p.l):
263 ret = self.p.index[i]
263 ret = self.p.index[i]
264 if not ret:
264 if not ret:
265 self.p.loadindex(i)
265 self.p.loadindex(i)
266 ret = self.p.index[i]
266 ret = self.p.index[i]
267 if isinstance(ret, str):
267 if isinstance(ret, str):
268 ret = struct.unpack(self.p.indexformat, ret)
268 ret = struct.unpack(self.p.indexformat, ret)
269 yield ret[-1]
269 yield ret[-1]
270 def __getitem__(self, key):
270 def __getitem__(self, key):
271 try:
271 try:
272 return self.p.map[key]
272 return self.p.map[key]
273 except KeyError:
273 except KeyError:
274 try:
274 try:
275 self.load(key)
275 self.load(key)
276 return self.p.map[key]
276 return self.p.map[key]
277 except KeyError:
277 except KeyError:
278 raise KeyError("node " + hex(key))
278 raise KeyError("node " + hex(key))
279 def __setitem__(self, key, val):
279 def __setitem__(self, key, val):
280 self.p.map[key] = val
280 self.p.map[key] = val
281 def __delitem__(self, key):
281 def __delitem__(self, key):
282 del self.p.map[key]
282 del self.p.map[key]
283
283
284 class RevlogError(Exception): pass
284 class RevlogError(Exception): pass
285
285
286 class revlog(object):
286 class revlog(object):
287 """
287 """
288 the underlying revision storage object
288 the underlying revision storage object
289
289
290 A revlog consists of two parts, an index and the revision data.
290 A revlog consists of two parts, an index and the revision data.
291
291
292 The index is a file with a fixed record size containing
292 The index is a file with a fixed record size containing
293 information on each revision, includings its nodeid (hash), the
293 information on each revision, includings its nodeid (hash), the
294 nodeids of its parents, the position and offset of its data within
294 nodeids of its parents, the position and offset of its data within
295 the data file, and the revision it's based on. Finally, each entry
295 the data file, and the revision it's based on. Finally, each entry
296 contains a linkrev entry that can serve as a pointer to external
296 contains a linkrev entry that can serve as a pointer to external
297 data.
297 data.
298
298
299 The revision data itself is a linear collection of data chunks.
299 The revision data itself is a linear collection of data chunks.
300 Each chunk represents a revision and is usually represented as a
300 Each chunk represents a revision and is usually represented as a
301 delta against the previous chunk. To bound lookup time, runs of
301 delta against the previous chunk. To bound lookup time, runs of
302 deltas are limited to about 2 times the length of the original
302 deltas are limited to about 2 times the length of the original
303 version data. This makes retrieval of a version proportional to
303 version data. This makes retrieval of a version proportional to
304 its size, or O(1) relative to the number of revisions.
304 its size, or O(1) relative to the number of revisions.
305
305
306 Both pieces of the revlog are written to in an append-only
306 Both pieces of the revlog are written to in an append-only
307 fashion, which means we never need to rewrite a file to insert or
307 fashion, which means we never need to rewrite a file to insert or
308 remove data, and can use some simple techniques to avoid the need
308 remove data, and can use some simple techniques to avoid the need
309 for locking while reading.
309 for locking while reading.
310 """
310 """
311 def __init__(self, opener, indexfile, datafile,
311 def __init__(self, opener, indexfile, datafile,
312 defversion=REVLOG_DEFAULT_VERSION):
312 defversion=REVLOG_DEFAULT_VERSION):
313 """
313 """
314 create a revlog object
314 create a revlog object
315
315
316 opener is a function that abstracts the file opening operation
316 opener is a function that abstracts the file opening operation
317 and can be used to implement COW semantics or the like.
317 and can be used to implement COW semantics or the like.
318 """
318 """
319 self.indexfile = indexfile
319 self.indexfile = indexfile
320 self.datafile = datafile
320 self.datafile = datafile
321 self.opener = opener
321 self.opener = opener
322
322
323 self.indexstat = None
323 self.indexstat = None
324 self.cache = None
324 self.cache = None
325 self.chunkcache = None
325 self.chunkcache = None
326 self.defversion = defversion
326 self.defversion = defversion
327 self.load()
327 self.load()
328
328
329 def load(self):
329 def load(self):
330 v = self.defversion
330 v = self.defversion
331 try:
331 try:
332 f = self.opener(self.indexfile)
332 f = self.opener(self.indexfile)
333 i = f.read(4)
333 i = f.read(4)
334 f.seek(0)
334 f.seek(0)
335 except IOError, inst:
335 except IOError, inst:
336 if inst.errno != errno.ENOENT:
336 if inst.errno != errno.ENOENT:
337 raise
337 raise
338 i = ""
338 i = ""
339 else:
339 else:
340 try:
340 try:
341 st = util.fstat(f)
341 st = util.fstat(f)
342 except AttributeError, inst:
342 except AttributeError, inst:
343 st = None
343 st = None
344 else:
344 else:
345 oldst = self.indexstat
345 oldst = self.indexstat
346 if (oldst and st.st_dev == oldst.st_dev
346 if (oldst and st.st_dev == oldst.st_dev
347 and st.st_ino == oldst.st_ino
347 and st.st_ino == oldst.st_ino
348 and st.st_mtime == oldst.st_mtime
348 and st.st_mtime == oldst.st_mtime
349 and st.st_ctime == oldst.st_ctime):
349 and st.st_ctime == oldst.st_ctime):
350 return
350 return
351 self.indexstat = st
351 self.indexstat = st
352 if len(i) > 0:
352 if len(i) > 0:
353 v = struct.unpack(versionformat, i)[0]
353 v = struct.unpack(versionformat, i)[0]
354 flags = v & ~0xFFFF
354 flags = v & ~0xFFFF
355 fmt = v & 0xFFFF
355 fmt = v & 0xFFFF
356 if fmt == REVLOGV0:
356 if fmt == REVLOGV0:
357 if flags:
357 if flags:
358 raise RevlogError(_("index %s invalid flags %x for format v0" %
358 raise RevlogError(_("index %s invalid flags %x for format v0" %
359 (self.indexfile, flags)))
359 (self.indexfile, flags)))
360 elif fmt == REVLOGNG:
360 elif fmt == REVLOGNG:
361 if flags & ~REVLOGNGINLINEDATA:
361 if flags & ~REVLOGNGINLINEDATA:
362 raise RevlogError(_("index %s invalid flags %x for revlogng" %
362 raise RevlogError(_("index %s invalid flags %x for revlogng" %
363 (self.indexfile, flags)))
363 (self.indexfile, flags)))
364 else:
364 else:
365 raise RevlogError(_("index %s invalid format %d" %
365 raise RevlogError(_("index %s invalid format %d" %
366 (self.indexfile, fmt)))
366 (self.indexfile, fmt)))
367 self.version = v
367 self.version = v
368 if v == REVLOGV0:
368 if v == REVLOGV0:
369 self.indexformat = indexformatv0
369 self.indexformat = indexformatv0
370 shaoffset = v0shaoffset
370 shaoffset = v0shaoffset
371 else:
371 else:
372 self.indexformat = indexformatng
372 self.indexformat = indexformatng
373 shaoffset = ngshaoffset
373 shaoffset = ngshaoffset
374
374
375 if i:
375 if i:
376 if (lazyparser.safe_to_use and not self.inlinedata() and
376 if (lazyparser.safe_to_use and not self.inlinedata() and
377 st and st.st_size > 10000):
377 st and st.st_size > 10000):
378 # big index, let's parse it on demand
378 # big index, let's parse it on demand
379 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
379 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
380 self.index = lazyindex(parser)
380 self.index = lazyindex(parser)
381 self.nodemap = lazymap(parser)
381 self.nodemap = lazymap(parser)
382 else:
382 else:
383 self.parseindex(f, st)
383 self.parseindex(f, st)
384 if self.version != REVLOGV0:
384 if self.version != REVLOGV0:
385 e = list(self.index[0])
385 e = list(self.index[0])
386 type = self.ngtype(e[0])
386 type = self.ngtype(e[0])
387 e[0] = self.offset_type(0, type)
387 e[0] = self.offset_type(0, type)
388 self.index[0] = e
388 self.index[0] = e
389 else:
389 else:
390 self.nodemap = { nullid: -1}
390 self.nodemap = { nullid: -1}
391 self.index = []
391 self.index = []
392
392
393
393
394 def parseindex(self, fp, st):
394 def parseindex(self, fp, st):
395 s = struct.calcsize(self.indexformat)
395 s = struct.calcsize(self.indexformat)
396 self.index = []
396 self.index = []
397 self.nodemap = {nullid: -1}
397 self.nodemap = {nullid: -1}
398 inline = self.inlinedata()
398 inline = self.inlinedata()
399 n = 0
399 n = 0
400 leftover = None
400 leftover = None
401 while True:
401 while True:
402 if st:
402 if st:
403 data = fp.read(65536)
403 data = fp.read(65536)
404 else:
404 else:
405 # hack for httprangereader, it doesn't do partial reads well
405 # hack for httprangereader, it doesn't do partial reads well
406 data = fp.read()
406 data = fp.read()
407 if not data:
407 if not data:
408 break
408 break
409 if n == 0 and self.inlinedata():
409 if n == 0 and self.inlinedata():
410 # cache the first chunk
410 # cache the first chunk
411 self.chunkcache = (0, data)
411 self.chunkcache = (0, data)
412 if leftover:
412 if leftover:
413 data = leftover + data
413 data = leftover + data
414 leftover = None
414 leftover = None
415 off = 0
415 off = 0
416 l = len(data)
416 l = len(data)
417 while off < l:
417 while off < l:
418 if l - off < s:
418 if l - off < s:
419 leftover = data[off:]
419 leftover = data[off:]
420 break
420 break
421 cur = data[off:off + s]
421 cur = data[off:off + s]
422 off += s
422 off += s
423 e = struct.unpack(self.indexformat, cur)
423 e = struct.unpack(self.indexformat, cur)
424 self.index.append(e)
424 self.index.append(e)
425 self.nodemap[e[-1]] = n
425 self.nodemap[e[-1]] = n
426 n += 1
426 n += 1
427 if inline:
427 if inline:
428 off += e[1]
428 off += e[1]
429 if off > l:
429 if off > l:
430 # some things don't seek well, just read it
430 # some things don't seek well, just read it
431 fp.read(off - l)
431 fp.read(off - l)
432 if not st:
432 if not st:
433 break
433 break
434
434
435
435
436 def ngoffset(self, q):
436 def ngoffset(self, q):
437 if q & 0xFFFF:
437 if q & 0xFFFF:
438 raise RevlogError(_('%s: incompatible revision flag %x') %
438 raise RevlogError(_('%s: incompatible revision flag %x') %
439 (self.indexfile, q))
439 (self.indexfile, q))
440 return long(q >> 16)
440 return long(q >> 16)
441
441
442 def ngtype(self, q):
442 def ngtype(self, q):
443 return int(q & 0xFFFF)
443 return int(q & 0xFFFF)
444
444
445 def offset_type(self, offset, type):
445 def offset_type(self, offset, type):
446 return long(long(offset) << 16 | type)
446 return long(long(offset) << 16 | type)
447
447
448 def loadindex(self, start, end):
448 def loadindex(self, start, end):
449 """load a block of indexes all at once from the lazy parser"""
449 """load a block of indexes all at once from the lazy parser"""
450 if isinstance(self.index, lazyindex):
450 if isinstance(self.index, lazyindex):
451 self.index.p.loadindex(start, end)
451 self.index.p.loadindex(start, end)
452
452
453 def loadindexmap(self):
453 def loadindexmap(self):
454 """loads both the map and the index from the lazy parser"""
454 """loads both the map and the index from the lazy parser"""
455 if isinstance(self.index, lazyindex):
455 if isinstance(self.index, lazyindex):
456 p = self.index.p
456 p = self.index.p
457 p.loadindex()
457 p.loadindex()
458 self.nodemap = p.map
458 self.nodemap = p.map
459
459
460 def loadmap(self):
460 def loadmap(self):
461 """loads the map from the lazy parser"""
461 """loads the map from the lazy parser"""
462 if isinstance(self.nodemap, lazymap):
462 if isinstance(self.nodemap, lazymap):
463 self.nodemap.p.loadmap()
463 self.nodemap.p.loadmap()
464 self.nodemap = self.nodemap.p.map
464 self.nodemap = self.nodemap.p.map
465
465
466 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
466 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
467 def tip(self): return self.node(len(self.index) - 1)
467 def tip(self): return self.node(len(self.index) - 1)
468 def count(self): return len(self.index)
468 def count(self): return len(self.index)
469 def node(self, rev):
469 def node(self, rev):
470 return (rev < 0) and nullid or self.index[rev][-1]
470 return (rev < 0) and nullid or self.index[rev][-1]
471 def rev(self, node):
471 def rev(self, node):
472 try:
472 try:
473 return self.nodemap[node]
473 return self.nodemap[node]
474 except KeyError:
474 except KeyError:
475 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
475 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
476 def linkrev(self, node):
476 def linkrev(self, node):
477 return (node == nullid) and -1 or self.index[self.rev(node)][-4]
477 return (node == nullid) and -1 or self.index[self.rev(node)][-4]
478 def parents(self, node):
478 def parents(self, node):
479 if node == nullid: return (nullid, nullid)
479 if node == nullid: return (nullid, nullid)
480 r = self.rev(node)
480 r = self.rev(node)
481 d = self.index[r][-3:-1]
481 d = self.index[r][-3:-1]
482 if self.version == REVLOGV0:
482 if self.version == REVLOGV0:
483 return d
483 return d
484 return [ self.node(x) for x in d ]
484 return [ self.node(x) for x in d ]
485 def parentrevs(self, rev):
485 def parentrevs(self, rev):
486 if rev == -1:
486 if rev == -1:
487 return (-1, -1)
487 return (-1, -1)
488 d = self.index[rev][-3:-1]
488 d = self.index[rev][-3:-1]
489 if self.version == REVLOGV0:
489 if self.version == REVLOGV0:
490 return [ self.rev(x) for x in d ]
490 return [ self.rev(x) for x in d ]
491 return d
491 return d
492 def start(self, rev):
492 def start(self, rev):
493 if rev < 0:
493 if rev < 0:
494 return -1
494 return -1
495 if self.version != REVLOGV0:
495 if self.version != REVLOGV0:
496 return self.ngoffset(self.index[rev][0])
496 return self.ngoffset(self.index[rev][0])
497 return self.index[rev][0]
497 return self.index[rev][0]
498
498
499 def end(self, rev): return self.start(rev) + self.length(rev)
499 def end(self, rev): return self.start(rev) + self.length(rev)
500
500
501 def size(self, rev):
501 def size(self, rev):
502 """return the length of the uncompressed text for a given revision"""
502 """return the length of the uncompressed text for a given revision"""
503 l = -1
503 l = -1
504 if self.version != REVLOGV0:
504 if self.version != REVLOGV0:
505 l = self.index[rev][2]
505 l = self.index[rev][2]
506 if l >= 0:
506 if l >= 0:
507 return l
507 return l
508
508
509 t = self.revision(self.node(rev))
509 t = self.revision(self.node(rev))
510 return len(t)
510 return len(t)
511
511
512 # alternate implementation, The advantage to this code is it
512 # alternate implementation, The advantage to this code is it
513 # will be faster for a single revision. But, the results are not
513 # will be faster for a single revision. But, the results are not
514 # cached, so finding the size of every revision will be slower.
514 # cached, so finding the size of every revision will be slower.
515 """
515 """
516 if self.cache and self.cache[1] == rev:
516 if self.cache and self.cache[1] == rev:
517 return len(self.cache[2])
517 return len(self.cache[2])
518
518
519 base = self.base(rev)
519 base = self.base(rev)
520 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
520 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
521 base = self.cache[1]
521 base = self.cache[1]
522 text = self.cache[2]
522 text = self.cache[2]
523 else:
523 else:
524 text = self.revision(self.node(base))
524 text = self.revision(self.node(base))
525
525
526 l = len(text)
526 l = len(text)
527 for x in xrange(base + 1, rev + 1):
527 for x in xrange(base + 1, rev + 1):
528 l = mdiff.patchedsize(l, self.chunk(x))
528 l = mdiff.patchedsize(l, self.chunk(x))
529 return l
529 return l
530 """
530 """
531
531
532 def length(self, rev):
532 def length(self, rev):
533 if rev < 0:
533 if rev < 0:
534 return 0
534 return 0
535 else:
535 else:
536 return self.index[rev][1]
536 return self.index[rev][1]
537 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
537 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
538
538
539 def reachable(self, rev, stop=None):
539 def reachable(self, rev, stop=None):
540 reachable = {}
540 reachable = {}
541 visit = [rev]
541 visit = [rev]
542 reachable[rev] = 1
542 reachable[rev] = 1
543 if stop:
543 if stop:
544 stopn = self.rev(stop)
544 stopn = self.rev(stop)
545 else:
545 else:
546 stopn = 0
546 stopn = 0
547 while visit:
547 while visit:
548 n = visit.pop(0)
548 n = visit.pop(0)
549 if n == stop:
549 if n == stop:
550 continue
550 continue
551 if n == nullid:
551 if n == nullid:
552 continue
552 continue
553 for p in self.parents(n):
553 for p in self.parents(n):
554 if self.rev(p) < stopn:
554 if self.rev(p) < stopn:
555 continue
555 continue
556 if p not in reachable:
556 if p not in reachable:
557 reachable[p] = 1
557 reachable[p] = 1
558 visit.append(p)
558 visit.append(p)
559 return reachable
559 return reachable
560
560
561 def nodesbetween(self, roots=None, heads=None):
561 def nodesbetween(self, roots=None, heads=None):
562 """Return a tuple containing three elements. Elements 1 and 2 contain
562 """Return a tuple containing three elements. Elements 1 and 2 contain
563 a final list bases and heads after all the unreachable ones have been
563 a final list bases and heads after all the unreachable ones have been
564 pruned. Element 0 contains a topologically sorted list of all
564 pruned. Element 0 contains a topologically sorted list of all
565
565
566 nodes that satisfy these constraints:
566 nodes that satisfy these constraints:
567 1. All nodes must be descended from a node in roots (the nodes on
567 1. All nodes must be descended from a node in roots (the nodes on
568 roots are considered descended from themselves).
568 roots are considered descended from themselves).
569 2. All nodes must also be ancestors of a node in heads (the nodes in
569 2. All nodes must also be ancestors of a node in heads (the nodes in
570 heads are considered to be their own ancestors).
570 heads are considered to be their own ancestors).
571
571
572 If roots is unspecified, nullid is assumed as the only root.
572 If roots is unspecified, nullid is assumed as the only root.
573 If heads is unspecified, it is taken to be the output of the
573 If heads is unspecified, it is taken to be the output of the
574 heads method (i.e. a list of all nodes in the repository that
574 heads method (i.e. a list of all nodes in the repository that
575 have no children)."""
575 have no children)."""
576 nonodes = ([], [], [])
576 nonodes = ([], [], [])
577 if roots is not None:
577 if roots is not None:
578 roots = list(roots)
578 roots = list(roots)
579 if not roots:
579 if not roots:
580 return nonodes
580 return nonodes
581 lowestrev = min([self.rev(n) for n in roots])
581 lowestrev = min([self.rev(n) for n in roots])
582 else:
582 else:
583 roots = [nullid] # Everybody's a descendent of nullid
583 roots = [nullid] # Everybody's a descendent of nullid
584 lowestrev = -1
584 lowestrev = -1
585 if (lowestrev == -1) and (heads is None):
585 if (lowestrev == -1) and (heads is None):
586 # We want _all_ the nodes!
586 # We want _all_ the nodes!
587 return ([self.node(r) for r in xrange(0, self.count())],
587 return ([self.node(r) for r in xrange(0, self.count())],
588 [nullid], list(self.heads()))
588 [nullid], list(self.heads()))
589 if heads is None:
589 if heads is None:
590 # All nodes are ancestors, so the latest ancestor is the last
590 # All nodes are ancestors, so the latest ancestor is the last
591 # node.
591 # node.
592 highestrev = self.count() - 1
592 highestrev = self.count() - 1
593 # Set ancestors to None to signal that every node is an ancestor.
593 # Set ancestors to None to signal that every node is an ancestor.
594 ancestors = None
594 ancestors = None
595 # Set heads to an empty dictionary for later discovery of heads
595 # Set heads to an empty dictionary for later discovery of heads
596 heads = {}
596 heads = {}
597 else:
597 else:
598 heads = list(heads)
598 heads = list(heads)
599 if not heads:
599 if not heads:
600 return nonodes
600 return nonodes
601 ancestors = {}
601 ancestors = {}
602 # Turn heads into a dictionary so we can remove 'fake' heads.
602 # Turn heads into a dictionary so we can remove 'fake' heads.
603 # Also, later we will be using it to filter out the heads we can't
603 # Also, later we will be using it to filter out the heads we can't
604 # find from roots.
604 # find from roots.
605 heads = dict.fromkeys(heads, 0)
605 heads = dict.fromkeys(heads, 0)
606 # Start at the top and keep marking parents until we're done.
606 # Start at the top and keep marking parents until we're done.
607 nodestotag = heads.keys()
607 nodestotag = heads.keys()
608 # Remember where the top was so we can use it as a limit later.
608 # Remember where the top was so we can use it as a limit later.
609 highestrev = max([self.rev(n) for n in nodestotag])
609 highestrev = max([self.rev(n) for n in nodestotag])
610 while nodestotag:
610 while nodestotag:
611 # grab a node to tag
611 # grab a node to tag
612 n = nodestotag.pop()
612 n = nodestotag.pop()
613 # Never tag nullid
613 # Never tag nullid
614 if n == nullid:
614 if n == nullid:
615 continue
615 continue
616 # A node's revision number represents its place in a
616 # A node's revision number represents its place in a
617 # topologically sorted list of nodes.
617 # topologically sorted list of nodes.
618 r = self.rev(n)
618 r = self.rev(n)
619 if r >= lowestrev:
619 if r >= lowestrev:
620 if n not in ancestors:
620 if n not in ancestors:
621 # If we are possibly a descendent of one of the roots
621 # If we are possibly a descendent of one of the roots
622 # and we haven't already been marked as an ancestor
622 # and we haven't already been marked as an ancestor
623 ancestors[n] = 1 # Mark as ancestor
623 ancestors[n] = 1 # Mark as ancestor
624 # Add non-nullid parents to list of nodes to tag.
624 # Add non-nullid parents to list of nodes to tag.
625 nodestotag.extend([p for p in self.parents(n) if
625 nodestotag.extend([p for p in self.parents(n) if
626 p != nullid])
626 p != nullid])
627 elif n in heads: # We've seen it before, is it a fake head?
627 elif n in heads: # We've seen it before, is it a fake head?
628 # So it is, real heads should not be the ancestors of
628 # So it is, real heads should not be the ancestors of
629 # any other heads.
629 # any other heads.
630 heads.pop(n)
630 heads.pop(n)
631 if not ancestors:
631 if not ancestors:
632 return nonodes
632 return nonodes
633 # Now that we have our set of ancestors, we want to remove any
633 # Now that we have our set of ancestors, we want to remove any
634 # roots that are not ancestors.
634 # roots that are not ancestors.
635
635
636 # If one of the roots was nullid, everything is included anyway.
636 # If one of the roots was nullid, everything is included anyway.
637 if lowestrev > -1:
637 if lowestrev > -1:
638 # But, since we weren't, let's recompute the lowest rev to not
638 # But, since we weren't, let's recompute the lowest rev to not
639 # include roots that aren't ancestors.
639 # include roots that aren't ancestors.
640
640
641 # Filter out roots that aren't ancestors of heads
641 # Filter out roots that aren't ancestors of heads
642 roots = [n for n in roots if n in ancestors]
642 roots = [n for n in roots if n in ancestors]
643 # Recompute the lowest revision
643 # Recompute the lowest revision
644 if roots:
644 if roots:
645 lowestrev = min([self.rev(n) for n in roots])
645 lowestrev = min([self.rev(n) for n in roots])
646 else:
646 else:
647 # No more roots? Return empty list
647 # No more roots? Return empty list
648 return nonodes
648 return nonodes
649 else:
649 else:
650 # We are descending from nullid, and don't need to care about
650 # We are descending from nullid, and don't need to care about
651 # any other roots.
651 # any other roots.
652 lowestrev = -1
652 lowestrev = -1
653 roots = [nullid]
653 roots = [nullid]
654 # Transform our roots list into a 'set' (i.e. a dictionary where the
654 # Transform our roots list into a 'set' (i.e. a dictionary where the
655 # values don't matter.
655 # values don't matter.
656 descendents = dict.fromkeys(roots, 1)
656 descendents = dict.fromkeys(roots, 1)
657 # Also, keep the original roots so we can filter out roots that aren't
657 # Also, keep the original roots so we can filter out roots that aren't
658 # 'real' roots (i.e. are descended from other roots).
658 # 'real' roots (i.e. are descended from other roots).
659 roots = descendents.copy()
659 roots = descendents.copy()
660 # Our topologically sorted list of output nodes.
660 # Our topologically sorted list of output nodes.
661 orderedout = []
661 orderedout = []
662 # Don't start at nullid since we don't want nullid in our output list,
662 # Don't start at nullid since we don't want nullid in our output list,
663 # and if nullid shows up in descedents, empty parents will look like
663 # and if nullid shows up in descedents, empty parents will look like
664 # they're descendents.
664 # they're descendents.
665 for r in xrange(max(lowestrev, 0), highestrev + 1):
665 for r in xrange(max(lowestrev, 0), highestrev + 1):
666 n = self.node(r)
666 n = self.node(r)
667 isdescendent = False
667 isdescendent = False
668 if lowestrev == -1: # Everybody is a descendent of nullid
668 if lowestrev == -1: # Everybody is a descendent of nullid
669 isdescendent = True
669 isdescendent = True
670 elif n in descendents:
670 elif n in descendents:
671 # n is already a descendent
671 # n is already a descendent
672 isdescendent = True
672 isdescendent = True
673 # This check only needs to be done here because all the roots
673 # This check only needs to be done here because all the roots
674 # will start being marked is descendents before the loop.
674 # will start being marked is descendents before the loop.
675 if n in roots:
675 if n in roots:
676 # If n was a root, check if it's a 'real' root.
676 # If n was a root, check if it's a 'real' root.
677 p = tuple(self.parents(n))
677 p = tuple(self.parents(n))
678 # If any of its parents are descendents, it's not a root.
678 # If any of its parents are descendents, it's not a root.
679 if (p[0] in descendents) or (p[1] in descendents):
679 if (p[0] in descendents) or (p[1] in descendents):
680 roots.pop(n)
680 roots.pop(n)
681 else:
681 else:
682 p = tuple(self.parents(n))
682 p = tuple(self.parents(n))
683 # A node is a descendent if either of its parents are
683 # A node is a descendent if either of its parents are
684 # descendents. (We seeded the dependents list with the roots
684 # descendents. (We seeded the dependents list with the roots
685 # up there, remember?)
685 # up there, remember?)
686 if (p[0] in descendents) or (p[1] in descendents):
686 if (p[0] in descendents) or (p[1] in descendents):
687 descendents[n] = 1
687 descendents[n] = 1
688 isdescendent = True
688 isdescendent = True
689 if isdescendent and ((ancestors is None) or (n in ancestors)):
689 if isdescendent and ((ancestors is None) or (n in ancestors)):
690 # Only include nodes that are both descendents and ancestors.
690 # Only include nodes that are both descendents and ancestors.
691 orderedout.append(n)
691 orderedout.append(n)
692 if (ancestors is not None) and (n in heads):
692 if (ancestors is not None) and (n in heads):
693 # We're trying to figure out which heads are reachable
693 # We're trying to figure out which heads are reachable
694 # from roots.
694 # from roots.
695 # Mark this head as having been reached
695 # Mark this head as having been reached
696 heads[n] = 1
696 heads[n] = 1
697 elif ancestors is None:
697 elif ancestors is None:
698 # Otherwise, we're trying to discover the heads.
698 # Otherwise, we're trying to discover the heads.
699 # Assume this is a head because if it isn't, the next step
699 # Assume this is a head because if it isn't, the next step
700 # will eventually remove it.
700 # will eventually remove it.
701 heads[n] = 1
701 heads[n] = 1
702 # But, obviously its parents aren't.
702 # But, obviously its parents aren't.
703 for p in self.parents(n):
703 for p in self.parents(n):
704 heads.pop(p, None)
704 heads.pop(p, None)
705 heads = [n for n in heads.iterkeys() if heads[n] != 0]
705 heads = [n for n in heads.iterkeys() if heads[n] != 0]
706 roots = roots.keys()
706 roots = roots.keys()
707 assert orderedout
707 assert orderedout
708 assert roots
708 assert roots
709 assert heads
709 assert heads
710 return (orderedout, roots, heads)
710 return (orderedout, roots, heads)
711
711
712 def heads(self, start=None):
712 def heads(self, start=None):
713 """return the list of all nodes that have no children
713 """return the list of all nodes that have no children
714
714
715 if start is specified, only heads that are descendants of
715 if start is specified, only heads that are descendants of
716 start will be returned
716 start will be returned
717
717
718 """
718 """
719 if start is None:
719 if start is None:
720 start = nullid
720 start = nullid
721 startrev = self.rev(start)
721 startrev = self.rev(start)
722 reachable = {startrev: 1}
722 reachable = {startrev: 1}
723 heads = {startrev: 1}
723 heads = {startrev: 1}
724
724
725 parentrevs = self.parentrevs
725 parentrevs = self.parentrevs
726 for r in xrange(startrev + 1, self.count()):
726 for r in xrange(startrev + 1, self.count()):
727 for p in parentrevs(r):
727 for p in parentrevs(r):
728 if p in reachable:
728 if p in reachable:
729 reachable[r] = 1
729 reachable[r] = 1
730 heads[r] = 1
730 heads[r] = 1
731 if p in heads:
731 if p in heads:
732 del heads[p]
732 del heads[p]
733 return [self.node(r) for r in heads]
733 return [self.node(r) for r in heads]
734
734
735 def children(self, node):
735 def children(self, node):
736 """find the children of a given node"""
736 """find the children of a given node"""
737 c = []
737 c = []
738 p = self.rev(node)
738 p = self.rev(node)
739 for r in range(p + 1, self.count()):
739 for r in range(p + 1, self.count()):
740 n = self.node(r)
740 n = self.node(r)
741 for pn in self.parents(n):
741 for pn in self.parents(n):
742 if pn == node:
742 if pn == node:
743 c.append(n)
743 c.append(n)
744 continue
744 continue
745 elif pn == nullid:
745 elif pn == nullid:
746 continue
746 continue
747 return c
747 return c
748
748
749 def lookup(self, id):
749 def _match(self, id):
750 """locate a node based on:
751 - revision number or str(revision number)
752 - nodeid or subset of hex nodeid
753 """
754 if isinstance(id, (long, int)):
750 if isinstance(id, (long, int)):
755 # rev
751 # rev
756 return self.node(id)
752 return self.node(id)
757 if len(id) == 20:
753 if len(id) == 20:
758 # possibly a binary node
754 # possibly a binary node
759 # odds of a binary node being all hex in ASCII are 1 in 10**25
755 # odds of a binary node being all hex in ASCII are 1 in 10**25
760 try:
756 try:
761 node = id
757 node = id
762 r = self.rev(node) # quick search the index
758 r = self.rev(node) # quick search the index
763 return node
759 return node
764 except RevlogError:
760 except RevlogError:
765 pass # may be partial hex id
761 pass # may be partial hex id
766 try:
762 try:
767 # str(rev)
763 # str(rev)
768 rev = int(id)
764 rev = int(id)
769 if str(rev) != id: raise ValueError
765 if str(rev) != id: raise ValueError
770 if rev < 0: rev = self.count() + rev
766 if rev < 0: rev = self.count() + rev
771 if rev < 0 or rev >= self.count(): raise ValueError
767 if rev < 0 or rev >= self.count(): raise ValueError
772 return self.node(rev)
768 return self.node(rev)
773 except (ValueError, OverflowError):
769 except (ValueError, OverflowError):
774 pass
770 pass
775 try:
771 if len(id) == 40:
776 if len(id) == 40:
772 try:
777 # a full hex nodeid?
773 # a full hex nodeid?
778 node = bin(id)
774 node = bin(id)
779 r = self.rev(node)
775 r = self.rev(node)
780 return node
776 return node
781 elif len(id) < 40:
777 except TypeError:
778 pass
779
780 def _partialmatch(self, id):
781 if len(id) < 40:
782 try:
782 # hex(node)[:...]
783 # hex(node)[:...]
783 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
784 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
784 node = None
785 node = None
785 for n in self.nodemap:
786 for n in self.nodemap:
786 if n.startswith(bin_id) and hex(n).startswith(id):
787 if n.startswith(bin_id) and hex(n).startswith(id):
787 if node is not None:
788 if node is not None:
788 raise RevlogError(_("Ambiguous identifier"))
789 raise RevlogError(_("Ambiguous identifier"))
789 node = n
790 node = n
790 if node is not None:
791 if node is not None:
791 return node
792 return node
792 except TypeError:
793 except TypeError:
793 pass
794 pass
795
796 def lookup(self, id):
797 """locate a node based on:
798 - revision number or str(revision number)
799 - nodeid or subset of hex nodeid
800 """
801
802 n = self._match(id)
803 if n is not None:
804 return n
805 n = self._partialmatch(id)
806 if n:
807 return n
794
808
795 raise RevlogError(_("No match found"))
809 raise RevlogError(_("No match found"))
796
810
797 def cmp(self, node, text):
811 def cmp(self, node, text):
798 """compare text with a given file revision"""
812 """compare text with a given file revision"""
799 p1, p2 = self.parents(node)
813 p1, p2 = self.parents(node)
800 return hash(text, p1, p2) != node
814 return hash(text, p1, p2) != node
801
815
802 def makenode(self, node, text):
816 def makenode(self, node, text):
803 """calculate a file nodeid for text, descended or possibly
817 """calculate a file nodeid for text, descended or possibly
804 unchanged from node"""
818 unchanged from node"""
805
819
806 if self.cmp(node, text):
820 if self.cmp(node, text):
807 return hash(text, node, nullid)
821 return hash(text, node, nullid)
808 return node
822 return node
809
823
810 def diff(self, a, b):
824 def diff(self, a, b):
811 """return a delta between two revisions"""
825 """return a delta between two revisions"""
812 return mdiff.textdiff(a, b)
826 return mdiff.textdiff(a, b)
813
827
814 def patches(self, t, pl):
828 def patches(self, t, pl):
815 """apply a list of patches to a string"""
829 """apply a list of patches to a string"""
816 return mdiff.patches(t, pl)
830 return mdiff.patches(t, pl)
817
831
818 def chunk(self, rev, df=None, cachelen=4096):
832 def chunk(self, rev, df=None, cachelen=4096):
819 start, length = self.start(rev), self.length(rev)
833 start, length = self.start(rev), self.length(rev)
820 inline = self.inlinedata()
834 inline = self.inlinedata()
821 if inline:
835 if inline:
822 start += (rev + 1) * struct.calcsize(self.indexformat)
836 start += (rev + 1) * struct.calcsize(self.indexformat)
823 end = start + length
837 end = start + length
824 def loadcache(df):
838 def loadcache(df):
825 cache_length = max(cachelen, length) # 4k
839 cache_length = max(cachelen, length) # 4k
826 if not df:
840 if not df:
827 if inline:
841 if inline:
828 df = self.opener(self.indexfile)
842 df = self.opener(self.indexfile)
829 else:
843 else:
830 df = self.opener(self.datafile)
844 df = self.opener(self.datafile)
831 df.seek(start)
845 df.seek(start)
832 self.chunkcache = (start, df.read(cache_length))
846 self.chunkcache = (start, df.read(cache_length))
833
847
834 if not self.chunkcache:
848 if not self.chunkcache:
835 loadcache(df)
849 loadcache(df)
836
850
837 cache_start = self.chunkcache[0]
851 cache_start = self.chunkcache[0]
838 cache_end = cache_start + len(self.chunkcache[1])
852 cache_end = cache_start + len(self.chunkcache[1])
839 if start >= cache_start and end <= cache_end:
853 if start >= cache_start and end <= cache_end:
840 # it is cached
854 # it is cached
841 offset = start - cache_start
855 offset = start - cache_start
842 else:
856 else:
843 loadcache(df)
857 loadcache(df)
844 offset = 0
858 offset = 0
845
859
846 #def checkchunk():
860 #def checkchunk():
847 # df = self.opener(self.datafile)
861 # df = self.opener(self.datafile)
848 # df.seek(start)
862 # df.seek(start)
849 # return df.read(length)
863 # return df.read(length)
850 #assert s == checkchunk()
864 #assert s == checkchunk()
851 return decompress(self.chunkcache[1][offset:offset + length])
865 return decompress(self.chunkcache[1][offset:offset + length])
852
866
853 def delta(self, node):
867 def delta(self, node):
854 """return or calculate a delta between a node and its predecessor"""
868 """return or calculate a delta between a node and its predecessor"""
855 r = self.rev(node)
869 r = self.rev(node)
856 return self.revdiff(r - 1, r)
870 return self.revdiff(r - 1, r)
857
871
858 def revdiff(self, rev1, rev2):
872 def revdiff(self, rev1, rev2):
859 """return or calculate a delta between two revisions"""
873 """return or calculate a delta between two revisions"""
860 b1 = self.base(rev1)
874 b1 = self.base(rev1)
861 b2 = self.base(rev2)
875 b2 = self.base(rev2)
862 if b1 == b2 and rev1 + 1 == rev2:
876 if b1 == b2 and rev1 + 1 == rev2:
863 return self.chunk(rev2)
877 return self.chunk(rev2)
864 else:
878 else:
865 return self.diff(self.revision(self.node(rev1)),
879 return self.diff(self.revision(self.node(rev1)),
866 self.revision(self.node(rev2)))
880 self.revision(self.node(rev2)))
867
881
868 def revision(self, node):
882 def revision(self, node):
869 """return an uncompressed revision of a given"""
883 """return an uncompressed revision of a given"""
870 if node == nullid: return ""
884 if node == nullid: return ""
871 if self.cache and self.cache[0] == node: return self.cache[2]
885 if self.cache and self.cache[0] == node: return self.cache[2]
872
886
873 # look up what we need to read
887 # look up what we need to read
874 text = None
888 text = None
875 rev = self.rev(node)
889 rev = self.rev(node)
876 base = self.base(rev)
890 base = self.base(rev)
877
891
878 if self.inlinedata():
892 if self.inlinedata():
879 # we probably have the whole chunk cached
893 # we probably have the whole chunk cached
880 df = None
894 df = None
881 else:
895 else:
882 df = self.opener(self.datafile)
896 df = self.opener(self.datafile)
883
897
884 # do we have useful data cached?
898 # do we have useful data cached?
885 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
899 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
886 base = self.cache[1]
900 base = self.cache[1]
887 text = self.cache[2]
901 text = self.cache[2]
888 self.loadindex(base, rev + 1)
902 self.loadindex(base, rev + 1)
889 else:
903 else:
890 self.loadindex(base, rev + 1)
904 self.loadindex(base, rev + 1)
891 text = self.chunk(base, df=df)
905 text = self.chunk(base, df=df)
892
906
893 bins = []
907 bins = []
894 for r in xrange(base + 1, rev + 1):
908 for r in xrange(base + 1, rev + 1):
895 bins.append(self.chunk(r, df=df))
909 bins.append(self.chunk(r, df=df))
896
910
897 text = self.patches(text, bins)
911 text = self.patches(text, bins)
898
912
899 p1, p2 = self.parents(node)
913 p1, p2 = self.parents(node)
900 if node != hash(text, p1, p2):
914 if node != hash(text, p1, p2):
901 raise RevlogError(_("integrity check failed on %s:%d")
915 raise RevlogError(_("integrity check failed on %s:%d")
902 % (self.datafile, rev))
916 % (self.datafile, rev))
903
917
904 self.cache = (node, rev, text)
918 self.cache = (node, rev, text)
905 return text
919 return text
906
920
907 def checkinlinesize(self, tr, fp=None):
921 def checkinlinesize(self, tr, fp=None):
908 if not self.inlinedata():
922 if not self.inlinedata():
909 return
923 return
910 if not fp:
924 if not fp:
911 fp = self.opener(self.indexfile, 'r')
925 fp = self.opener(self.indexfile, 'r')
912 fp.seek(0, 2)
926 fp.seek(0, 2)
913 size = fp.tell()
927 size = fp.tell()
914 if size < 131072:
928 if size < 131072:
915 return
929 return
916 trinfo = tr.find(self.indexfile)
930 trinfo = tr.find(self.indexfile)
917 if trinfo == None:
931 if trinfo == None:
918 raise RevlogError(_("%s not found in the transaction" %
932 raise RevlogError(_("%s not found in the transaction" %
919 self.indexfile))
933 self.indexfile))
920
934
921 trindex = trinfo[2]
935 trindex = trinfo[2]
922 dataoff = self.start(trindex)
936 dataoff = self.start(trindex)
923
937
924 tr.add(self.datafile, dataoff)
938 tr.add(self.datafile, dataoff)
925 df = self.opener(self.datafile, 'w')
939 df = self.opener(self.datafile, 'w')
926 calc = struct.calcsize(self.indexformat)
940 calc = struct.calcsize(self.indexformat)
927 for r in xrange(self.count()):
941 for r in xrange(self.count()):
928 start = self.start(r) + (r + 1) * calc
942 start = self.start(r) + (r + 1) * calc
929 length = self.length(r)
943 length = self.length(r)
930 fp.seek(start)
944 fp.seek(start)
931 d = fp.read(length)
945 d = fp.read(length)
932 df.write(d)
946 df.write(d)
933 fp.close()
947 fp.close()
934 df.close()
948 df.close()
935 fp = self.opener(self.indexfile, 'w', atomictemp=True)
949 fp = self.opener(self.indexfile, 'w', atomictemp=True)
936 self.version &= ~(REVLOGNGINLINEDATA)
950 self.version &= ~(REVLOGNGINLINEDATA)
937 if self.count():
951 if self.count():
938 x = self.index[0]
952 x = self.index[0]
939 e = struct.pack(self.indexformat, *x)[4:]
953 e = struct.pack(self.indexformat, *x)[4:]
940 l = struct.pack(versionformat, self.version)
954 l = struct.pack(versionformat, self.version)
941 fp.write(l)
955 fp.write(l)
942 fp.write(e)
956 fp.write(e)
943
957
944 for i in xrange(1, self.count()):
958 for i in xrange(1, self.count()):
945 x = self.index[i]
959 x = self.index[i]
946 e = struct.pack(self.indexformat, *x)
960 e = struct.pack(self.indexformat, *x)
947 fp.write(e)
961 fp.write(e)
948
962
949 # if we don't call rename, the temp file will never replace the
963 # if we don't call rename, the temp file will never replace the
950 # real index
964 # real index
951 fp.rename()
965 fp.rename()
952
966
953 tr.replace(self.indexfile, trindex * calc)
967 tr.replace(self.indexfile, trindex * calc)
954 self.chunkcache = None
968 self.chunkcache = None
955
969
956 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
970 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
957 """add a revision to the log
971 """add a revision to the log
958
972
959 text - the revision data to add
973 text - the revision data to add
960 transaction - the transaction object used for rollback
974 transaction - the transaction object used for rollback
961 link - the linkrev data to add
975 link - the linkrev data to add
962 p1, p2 - the parent nodeids of the revision
976 p1, p2 - the parent nodeids of the revision
963 d - an optional precomputed delta
977 d - an optional precomputed delta
964 """
978 """
965 if not self.inlinedata():
979 if not self.inlinedata():
966 dfh = self.opener(self.datafile, "a")
980 dfh = self.opener(self.datafile, "a")
967 else:
981 else:
968 dfh = None
982 dfh = None
969 ifh = self.opener(self.indexfile, "a+")
983 ifh = self.opener(self.indexfile, "a+")
970 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
984 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
971
985
972 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
986 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
973 if text is None: text = ""
987 if text is None: text = ""
974 if p1 is None: p1 = self.tip()
988 if p1 is None: p1 = self.tip()
975 if p2 is None: p2 = nullid
989 if p2 is None: p2 = nullid
976
990
977 node = hash(text, p1, p2)
991 node = hash(text, p1, p2)
978
992
979 if node in self.nodemap:
993 if node in self.nodemap:
980 return node
994 return node
981
995
982 n = self.count()
996 n = self.count()
983 t = n - 1
997 t = n - 1
984
998
985 if n:
999 if n:
986 base = self.base(t)
1000 base = self.base(t)
987 start = self.start(base)
1001 start = self.start(base)
988 end = self.end(t)
1002 end = self.end(t)
989 if not d:
1003 if not d:
990 prev = self.revision(self.tip())
1004 prev = self.revision(self.tip())
991 d = self.diff(prev, text)
1005 d = self.diff(prev, text)
992 data = compress(d)
1006 data = compress(d)
993 l = len(data[1]) + len(data[0])
1007 l = len(data[1]) + len(data[0])
994 dist = end - start + l
1008 dist = end - start + l
995
1009
996 # full versions are inserted when the needed deltas
1010 # full versions are inserted when the needed deltas
997 # become comparable to the uncompressed text
1011 # become comparable to the uncompressed text
998 if not n or dist > len(text) * 2:
1012 if not n or dist > len(text) * 2:
999 data = compress(text)
1013 data = compress(text)
1000 l = len(data[1]) + len(data[0])
1014 l = len(data[1]) + len(data[0])
1001 base = n
1015 base = n
1002 else:
1016 else:
1003 base = self.base(t)
1017 base = self.base(t)
1004
1018
1005 offset = 0
1019 offset = 0
1006 if t >= 0:
1020 if t >= 0:
1007 offset = self.end(t)
1021 offset = self.end(t)
1008
1022
1009 if self.version == REVLOGV0:
1023 if self.version == REVLOGV0:
1010 e = (offset, l, base, link, p1, p2, node)
1024 e = (offset, l, base, link, p1, p2, node)
1011 else:
1025 else:
1012 e = (self.offset_type(offset, 0), l, len(text),
1026 e = (self.offset_type(offset, 0), l, len(text),
1013 base, link, self.rev(p1), self.rev(p2), node)
1027 base, link, self.rev(p1), self.rev(p2), node)
1014
1028
1015 self.index.append(e)
1029 self.index.append(e)
1016 self.nodemap[node] = n
1030 self.nodemap[node] = n
1017 entry = struct.pack(self.indexformat, *e)
1031 entry = struct.pack(self.indexformat, *e)
1018
1032
1019 if not self.inlinedata():
1033 if not self.inlinedata():
1020 transaction.add(self.datafile, offset)
1034 transaction.add(self.datafile, offset)
1021 transaction.add(self.indexfile, n * len(entry))
1035 transaction.add(self.indexfile, n * len(entry))
1022 if data[0]:
1036 if data[0]:
1023 dfh.write(data[0])
1037 dfh.write(data[0])
1024 dfh.write(data[1])
1038 dfh.write(data[1])
1025 dfh.flush()
1039 dfh.flush()
1026 else:
1040 else:
1027 ifh.seek(0, 2)
1041 ifh.seek(0, 2)
1028 transaction.add(self.indexfile, ifh.tell(), self.count() - 1)
1042 transaction.add(self.indexfile, ifh.tell(), self.count() - 1)
1029
1043
1030 if len(self.index) == 1 and self.version != REVLOGV0:
1044 if len(self.index) == 1 and self.version != REVLOGV0:
1031 l = struct.pack(versionformat, self.version)
1045 l = struct.pack(versionformat, self.version)
1032 ifh.write(l)
1046 ifh.write(l)
1033 entry = entry[4:]
1047 entry = entry[4:]
1034
1048
1035 ifh.write(entry)
1049 ifh.write(entry)
1036
1050
1037 if self.inlinedata():
1051 if self.inlinedata():
1038 ifh.write(data[0])
1052 ifh.write(data[0])
1039 ifh.write(data[1])
1053 ifh.write(data[1])
1040 self.checkinlinesize(transaction, ifh)
1054 self.checkinlinesize(transaction, ifh)
1041
1055
1042 self.cache = (node, n, text)
1056 self.cache = (node, n, text)
1043 return node
1057 return node
1044
1058
1045 def ancestor(self, a, b):
1059 def ancestor(self, a, b):
1046 """calculate the least common ancestor of nodes a and b"""
1060 """calculate the least common ancestor of nodes a and b"""
1047
1061
1048 def parents(rev):
1062 def parents(rev):
1049 return [p for p in self.parentrevs(rev) if p != -1]
1063 return [p for p in self.parentrevs(rev) if p != -1]
1050
1064
1051 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1065 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1052 if c is None:
1066 if c is None:
1053 return nullid
1067 return nullid
1054
1068
1055 return self.node(c)
1069 return self.node(c)
1056
1070
1057 def group(self, nodelist, lookup, infocollect=None):
1071 def group(self, nodelist, lookup, infocollect=None):
1058 """calculate a delta group
1072 """calculate a delta group
1059
1073
1060 Given a list of changeset revs, return a set of deltas and
1074 Given a list of changeset revs, return a set of deltas and
1061 metadata corresponding to nodes. the first delta is
1075 metadata corresponding to nodes. the first delta is
1062 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1076 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1063 have this parent as it has all history before these
1077 have this parent as it has all history before these
1064 changesets. parent is parent[0]
1078 changesets. parent is parent[0]
1065 """
1079 """
1066 revs = [self.rev(n) for n in nodelist]
1080 revs = [self.rev(n) for n in nodelist]
1067
1081
1068 # if we don't have any revisions touched by these changesets, bail
1082 # if we don't have any revisions touched by these changesets, bail
1069 if not revs:
1083 if not revs:
1070 yield changegroup.closechunk()
1084 yield changegroup.closechunk()
1071 return
1085 return
1072
1086
1073 # add the parent of the first rev
1087 # add the parent of the first rev
1074 p = self.parents(self.node(revs[0]))[0]
1088 p = self.parents(self.node(revs[0]))[0]
1075 revs.insert(0, self.rev(p))
1089 revs.insert(0, self.rev(p))
1076
1090
1077 # build deltas
1091 # build deltas
1078 for d in xrange(0, len(revs) - 1):
1092 for d in xrange(0, len(revs) - 1):
1079 a, b = revs[d], revs[d + 1]
1093 a, b = revs[d], revs[d + 1]
1080 nb = self.node(b)
1094 nb = self.node(b)
1081
1095
1082 if infocollect is not None:
1096 if infocollect is not None:
1083 infocollect(nb)
1097 infocollect(nb)
1084
1098
1085 d = self.revdiff(a, b)
1099 d = self.revdiff(a, b)
1086 p = self.parents(nb)
1100 p = self.parents(nb)
1087 meta = nb + p[0] + p[1] + lookup(nb)
1101 meta = nb + p[0] + p[1] + lookup(nb)
1088 yield changegroup.genchunk("%s%s" % (meta, d))
1102 yield changegroup.genchunk("%s%s" % (meta, d))
1089
1103
1090 yield changegroup.closechunk()
1104 yield changegroup.closechunk()
1091
1105
1092 def addgroup(self, revs, linkmapper, transaction, unique=0):
1106 def addgroup(self, revs, linkmapper, transaction, unique=0):
1093 """
1107 """
1094 add a delta group
1108 add a delta group
1095
1109
1096 given a set of deltas, add them to the revision log. the
1110 given a set of deltas, add them to the revision log. the
1097 first delta is against its parent, which should be in our
1111 first delta is against its parent, which should be in our
1098 log, the rest are against the previous delta.
1112 log, the rest are against the previous delta.
1099 """
1113 """
1100
1114
1101 #track the base of the current delta log
1115 #track the base of the current delta log
1102 r = self.count()
1116 r = self.count()
1103 t = r - 1
1117 t = r - 1
1104 node = None
1118 node = None
1105
1119
1106 base = prev = -1
1120 base = prev = -1
1107 start = end = textlen = 0
1121 start = end = textlen = 0
1108 if r:
1122 if r:
1109 end = self.end(t)
1123 end = self.end(t)
1110
1124
1111 ifh = self.opener(self.indexfile, "a+")
1125 ifh = self.opener(self.indexfile, "a+")
1112 ifh.seek(0, 2)
1126 ifh.seek(0, 2)
1113 transaction.add(self.indexfile, ifh.tell(), self.count())
1127 transaction.add(self.indexfile, ifh.tell(), self.count())
1114 if self.inlinedata():
1128 if self.inlinedata():
1115 dfh = None
1129 dfh = None
1116 else:
1130 else:
1117 transaction.add(self.datafile, end)
1131 transaction.add(self.datafile, end)
1118 dfh = self.opener(self.datafile, "a")
1132 dfh = self.opener(self.datafile, "a")
1119
1133
1120 # loop through our set of deltas
1134 # loop through our set of deltas
1121 chain = None
1135 chain = None
1122 for chunk in revs:
1136 for chunk in revs:
1123 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1137 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1124 link = linkmapper(cs)
1138 link = linkmapper(cs)
1125 if node in self.nodemap:
1139 if node in self.nodemap:
1126 # this can happen if two branches make the same change
1140 # this can happen if two branches make the same change
1127 # if unique:
1141 # if unique:
1128 # raise RevlogError(_("already have %s") % hex(node[:4]))
1142 # raise RevlogError(_("already have %s") % hex(node[:4]))
1129 chain = node
1143 chain = node
1130 continue
1144 continue
1131 delta = chunk[80:]
1145 delta = chunk[80:]
1132
1146
1133 for p in (p1, p2):
1147 for p in (p1, p2):
1134 if not p in self.nodemap:
1148 if not p in self.nodemap:
1135 raise RevlogError(_("unknown parent %s") % short(p))
1149 raise RevlogError(_("unknown parent %s") % short(p))
1136
1150
1137 if not chain:
1151 if not chain:
1138 # retrieve the parent revision of the delta chain
1152 # retrieve the parent revision of the delta chain
1139 chain = p1
1153 chain = p1
1140 if not chain in self.nodemap:
1154 if not chain in self.nodemap:
1141 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1155 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1142
1156
1143 # full versions are inserted when the needed deltas become
1157 # full versions are inserted when the needed deltas become
1144 # comparable to the uncompressed text or when the previous
1158 # comparable to the uncompressed text or when the previous
1145 # version is not the one we have a delta against. We use
1159 # version is not the one we have a delta against. We use
1146 # the size of the previous full rev as a proxy for the
1160 # the size of the previous full rev as a proxy for the
1147 # current size.
1161 # current size.
1148
1162
1149 if chain == prev:
1163 if chain == prev:
1150 tempd = compress(delta)
1164 tempd = compress(delta)
1151 cdelta = tempd[0] + tempd[1]
1165 cdelta = tempd[0] + tempd[1]
1152 textlen = mdiff.patchedsize(textlen, delta)
1166 textlen = mdiff.patchedsize(textlen, delta)
1153
1167
1154 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1168 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1155 # flush our writes here so we can read it in revision
1169 # flush our writes here so we can read it in revision
1156 if dfh:
1170 if dfh:
1157 dfh.flush()
1171 dfh.flush()
1158 ifh.flush()
1172 ifh.flush()
1159 text = self.revision(chain)
1173 text = self.revision(chain)
1160 text = self.patches(text, [delta])
1174 text = self.patches(text, [delta])
1161 chk = self._addrevision(text, transaction, link, p1, p2, None,
1175 chk = self._addrevision(text, transaction, link, p1, p2, None,
1162 ifh, dfh)
1176 ifh, dfh)
1163 if not dfh and not self.inlinedata():
1177 if not dfh and not self.inlinedata():
1164 # addrevision switched from inline to conventional
1178 # addrevision switched from inline to conventional
1165 # reopen the index
1179 # reopen the index
1166 dfh = self.opener(self.datafile, "a")
1180 dfh = self.opener(self.datafile, "a")
1167 ifh = self.opener(self.indexfile, "a")
1181 ifh = self.opener(self.indexfile, "a")
1168 if chk != node:
1182 if chk != node:
1169 raise RevlogError(_("consistency error adding group"))
1183 raise RevlogError(_("consistency error adding group"))
1170 textlen = len(text)
1184 textlen = len(text)
1171 else:
1185 else:
1172 if self.version == REVLOGV0:
1186 if self.version == REVLOGV0:
1173 e = (end, len(cdelta), base, link, p1, p2, node)
1187 e = (end, len(cdelta), base, link, p1, p2, node)
1174 else:
1188 else:
1175 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1189 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1176 link, self.rev(p1), self.rev(p2), node)
1190 link, self.rev(p1), self.rev(p2), node)
1177 self.index.append(e)
1191 self.index.append(e)
1178 self.nodemap[node] = r
1192 self.nodemap[node] = r
1179 if self.inlinedata():
1193 if self.inlinedata():
1180 ifh.write(struct.pack(self.indexformat, *e))
1194 ifh.write(struct.pack(self.indexformat, *e))
1181 ifh.write(cdelta)
1195 ifh.write(cdelta)
1182 self.checkinlinesize(transaction, ifh)
1196 self.checkinlinesize(transaction, ifh)
1183 if not self.inlinedata():
1197 if not self.inlinedata():
1184 dfh = self.opener(self.datafile, "a")
1198 dfh = self.opener(self.datafile, "a")
1185 ifh = self.opener(self.indexfile, "a")
1199 ifh = self.opener(self.indexfile, "a")
1186 else:
1200 else:
1187 dfh.write(cdelta)
1201 dfh.write(cdelta)
1188 ifh.write(struct.pack(self.indexformat, *e))
1202 ifh.write(struct.pack(self.indexformat, *e))
1189
1203
1190 t, r, chain, prev = r, r + 1, node, node
1204 t, r, chain, prev = r, r + 1, node, node
1191 base = self.base(t)
1205 base = self.base(t)
1192 start = self.start(base)
1206 start = self.start(base)
1193 end = self.end(t)
1207 end = self.end(t)
1194
1208
1195 return node
1209 return node
1196
1210
1197 def strip(self, rev, minlink):
1211 def strip(self, rev, minlink):
1198 if self.count() == 0 or rev >= self.count():
1212 if self.count() == 0 or rev >= self.count():
1199 return
1213 return
1200
1214
1201 if isinstance(self.index, lazyindex):
1215 if isinstance(self.index, lazyindex):
1202 self.loadindexmap()
1216 self.loadindexmap()
1203
1217
1204 # When stripping away a revision, we need to make sure it
1218 # When stripping away a revision, we need to make sure it
1205 # does not actually belong to an older changeset.
1219 # does not actually belong to an older changeset.
1206 # The minlink parameter defines the oldest revision
1220 # The minlink parameter defines the oldest revision
1207 # we're allowed to strip away.
1221 # we're allowed to strip away.
1208 while minlink > self.index[rev][-4]:
1222 while minlink > self.index[rev][-4]:
1209 rev += 1
1223 rev += 1
1210 if rev >= self.count():
1224 if rev >= self.count():
1211 return
1225 return
1212
1226
1213 # first truncate the files on disk
1227 # first truncate the files on disk
1214 end = self.start(rev)
1228 end = self.start(rev)
1215 if not self.inlinedata():
1229 if not self.inlinedata():
1216 df = self.opener(self.datafile, "a")
1230 df = self.opener(self.datafile, "a")
1217 df.truncate(end)
1231 df.truncate(end)
1218 end = rev * struct.calcsize(self.indexformat)
1232 end = rev * struct.calcsize(self.indexformat)
1219 else:
1233 else:
1220 end += rev * struct.calcsize(self.indexformat)
1234 end += rev * struct.calcsize(self.indexformat)
1221
1235
1222 indexf = self.opener(self.indexfile, "a")
1236 indexf = self.opener(self.indexfile, "a")
1223 indexf.truncate(end)
1237 indexf.truncate(end)
1224
1238
1225 # then reset internal state in memory to forget those revisions
1239 # then reset internal state in memory to forget those revisions
1226 self.cache = None
1240 self.cache = None
1227 self.chunkcache = None
1241 self.chunkcache = None
1228 for x in xrange(rev, self.count()):
1242 for x in xrange(rev, self.count()):
1229 del self.nodemap[self.node(x)]
1243 del self.nodemap[self.node(x)]
1230
1244
1231 del self.index[rev:]
1245 del self.index[rev:]
1232
1246
1233 def checksize(self):
1247 def checksize(self):
1234 expected = 0
1248 expected = 0
1235 if self.count():
1249 if self.count():
1236 expected = self.end(self.count() - 1)
1250 expected = self.end(self.count() - 1)
1237
1251
1238 try:
1252 try:
1239 f = self.opener(self.datafile)
1253 f = self.opener(self.datafile)
1240 f.seek(0, 2)
1254 f.seek(0, 2)
1241 actual = f.tell()
1255 actual = f.tell()
1242 dd = actual - expected
1256 dd = actual - expected
1243 except IOError, inst:
1257 except IOError, inst:
1244 if inst.errno != errno.ENOENT:
1258 if inst.errno != errno.ENOENT:
1245 raise
1259 raise
1246 dd = 0
1260 dd = 0
1247
1261
1248 try:
1262 try:
1249 f = self.opener(self.indexfile)
1263 f = self.opener(self.indexfile)
1250 f.seek(0, 2)
1264 f.seek(0, 2)
1251 actual = f.tell()
1265 actual = f.tell()
1252 s = struct.calcsize(self.indexformat)
1266 s = struct.calcsize(self.indexformat)
1253 i = actual / s
1267 i = actual / s
1254 di = actual - (i * s)
1268 di = actual - (i * s)
1255 if self.inlinedata():
1269 if self.inlinedata():
1256 databytes = 0
1270 databytes = 0
1257 for r in xrange(self.count()):
1271 for r in xrange(self.count()):
1258 databytes += self.length(r)
1272 databytes += self.length(r)
1259 dd = 0
1273 dd = 0
1260 di = actual - self.count() * s - databytes
1274 di = actual - self.count() * s - databytes
1261 except IOError, inst:
1275 except IOError, inst:
1262 if inst.errno != errno.ENOENT:
1276 if inst.errno != errno.ENOENT:
1263 raise
1277 raise
1264 di = 0
1278 di = 0
1265
1279
1266 return (dd, di)
1280 return (dd, di)
1267
1281
1268
1282
General Comments 0
You need to be logged in to leave comments. Login now