##// END OF EJS Templates
Replaced 0 with REVLOGV0 where this meaning is used.
Thomas Arendsen Hein -
r2142:8a1e2a9c default
parent child Browse files
Show More
@@ -1,59 +1,59
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from revlog import *
8 from revlog import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import demandload
10 from demandload import demandload
11 demandload(globals(), "os time util")
11 demandload(globals(), "os time util")
12
12
13 class changelog(revlog):
13 class changelog(revlog):
14 def __init__(self, opener, defversion=0):
14 def __init__(self, opener, defversion=REVLOGV0):
15 revlog.__init__(self, opener, "00changelog.i", "00changelog.d",
15 revlog.__init__(self, opener, "00changelog.i", "00changelog.d",
16 defversion)
16 defversion)
17
17
18 def extract(self, text):
18 def extract(self, text):
19 if not text:
19 if not text:
20 return (nullid, "", (0, 0), [], "")
20 return (nullid, "", (0, 0), [], "")
21 last = text.index("\n\n")
21 last = text.index("\n\n")
22 desc = text[last + 2:]
22 desc = text[last + 2:]
23 l = text[:last].splitlines()
23 l = text[:last].splitlines()
24 manifest = bin(l[0])
24 manifest = bin(l[0])
25 user = l[1]
25 user = l[1]
26 date = l[2].split(' ')
26 date = l[2].split(' ')
27 time = float(date.pop(0))
27 time = float(date.pop(0))
28 try:
28 try:
29 # various tools did silly things with the time zone field.
29 # various tools did silly things with the time zone field.
30 timezone = int(date[0])
30 timezone = int(date[0])
31 except:
31 except:
32 timezone = 0
32 timezone = 0
33 files = l[3:]
33 files = l[3:]
34 return (manifest, user, (time, timezone), files, desc)
34 return (manifest, user, (time, timezone), files, desc)
35
35
36 def read(self, node):
36 def read(self, node):
37 return self.extract(self.revision(node))
37 return self.extract(self.revision(node))
38
38
39 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
39 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
40 user=None, date=None):
40 user=None, date=None):
41 if date:
41 if date:
42 # validate explicit (probably user-specified) date and
42 # validate explicit (probably user-specified) date and
43 # time zone offset. values must fit in signed 32 bits for
43 # time zone offset. values must fit in signed 32 bits for
44 # current 32-bit linux runtimes. timezones go from UTC-12
44 # current 32-bit linux runtimes. timezones go from UTC-12
45 # to UTC+14
45 # to UTC+14
46 try:
46 try:
47 when, offset = map(int, date.split(' '))
47 when, offset = map(int, date.split(' '))
48 except ValueError:
48 except ValueError:
49 raise ValueError(_('invalid date: %r') % date)
49 raise ValueError(_('invalid date: %r') % date)
50 if abs(when) > 0x7fffffff:
50 if abs(when) > 0x7fffffff:
51 raise ValueError(_('date exceeds 32 bits: %d') % when)
51 raise ValueError(_('date exceeds 32 bits: %d') % when)
52 if offset < -50400 or offset > 43200:
52 if offset < -50400 or offset > 43200:
53 raise ValueError(_('impossible time zone offset: %d') % offset)
53 raise ValueError(_('impossible time zone offset: %d') % offset)
54 else:
54 else:
55 date = "%d %d" % util.makedate()
55 date = "%d %d" % util.makedate()
56 list.sort()
56 list.sort()
57 l = [hex(manifest), user, date] + list + ["", desc]
57 l = [hex(manifest), user, date] + list + ["", desc]
58 text = "\n".join(l)
58 text = "\n".join(l)
59 return self.addrevision(text, transaction, self.count(), p1, p2)
59 return self.addrevision(text, transaction, self.count(), p1, p2)
@@ -1,1977 +1,1977
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui revlog")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui revlog")
15
15
16 class localrepository(object):
16 class localrepository(object):
17 def __del__(self):
17 def __del__(self):
18 self.transhandle = None
18 self.transhandle = None
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 if not path:
20 if not path:
21 p = os.getcwd()
21 p = os.getcwd()
22 while not os.path.isdir(os.path.join(p, ".hg")):
22 while not os.path.isdir(os.path.join(p, ".hg")):
23 oldp = p
23 oldp = p
24 p = os.path.dirname(p)
24 p = os.path.dirname(p)
25 if p == oldp:
25 if p == oldp:
26 raise repo.RepoError(_("no repo found"))
26 raise repo.RepoError(_("no repo found"))
27 path = p
27 path = p
28 self.path = os.path.join(path, ".hg")
28 self.path = os.path.join(path, ".hg")
29
29
30 if not create and not os.path.isdir(self.path):
30 if not create and not os.path.isdir(self.path):
31 raise repo.RepoError(_("repository %s not found") % path)
31 raise repo.RepoError(_("repository %s not found") % path)
32
32
33 self.root = os.path.abspath(path)
33 self.root = os.path.abspath(path)
34 self.origroot = path
34 self.origroot = path
35 self.ui = ui.ui(parentui=parentui)
35 self.ui = ui.ui(parentui=parentui)
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 except IOError:
41 except IOError:
42 pass
42 pass
43
43
44 v = self.ui.revlogopts
44 v = self.ui.revlogopts
45 self.revlogversion = int(v.get('format', 0))
45 self.revlogversion = int(v.get('format', revlog.REVLOGV0))
46 flags = 0
46 flags = 0
47 for x in v.get('flags', "").split():
47 for x in v.get('flags', "").split():
48 flags |= revlog.flagstr(x)
48 flags |= revlog.flagstr(x)
49
49
50 v = self.revlogversion | flags
50 v = self.revlogversion | flags
51 self.manifest = manifest.manifest(self.opener, v)
51 self.manifest = manifest.manifest(self.opener, v)
52 self.changelog = changelog.changelog(self.opener, v)
52 self.changelog = changelog.changelog(self.opener, v)
53
53
54 # the changelog might not have the inline index flag
54 # the changelog might not have the inline index flag
55 # on. If the format of the changelog is the same as found in
55 # on. If the format of the changelog is the same as found in
56 # .hgrc, apply any flags found in the .hgrc as well.
56 # .hgrc, apply any flags found in the .hgrc as well.
57 # Otherwise, just version from the changelog
57 # Otherwise, just version from the changelog
58 v = self.changelog.version
58 v = self.changelog.version
59 if v == self.revlogversion:
59 if v == self.revlogversion:
60 v |= flags
60 v |= flags
61 self.revlogversion = v
61 self.revlogversion = v
62
62
63 self.tagscache = None
63 self.tagscache = None
64 self.nodetagscache = None
64 self.nodetagscache = None
65 self.encodepats = None
65 self.encodepats = None
66 self.decodepats = None
66 self.decodepats = None
67 self.transhandle = None
67 self.transhandle = None
68
68
69 if create:
69 if create:
70 os.mkdir(self.path)
70 os.mkdir(self.path)
71 os.mkdir(self.join("data"))
71 os.mkdir(self.join("data"))
72
72
73 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
73 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
74 def hook(self, name, throw=False, **args):
74 def hook(self, name, throw=False, **args):
75 def runhook(name, cmd):
75 def runhook(name, cmd):
76 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
76 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
77 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
77 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
78 [(k.upper(), v) for k, v in args.iteritems()])
78 [(k.upper(), v) for k, v in args.iteritems()])
79 r = util.system(cmd, environ=env, cwd=self.root)
79 r = util.system(cmd, environ=env, cwd=self.root)
80 if r:
80 if r:
81 desc, r = util.explain_exit(r)
81 desc, r = util.explain_exit(r)
82 if throw:
82 if throw:
83 raise util.Abort(_('%s hook %s') % (name, desc))
83 raise util.Abort(_('%s hook %s') % (name, desc))
84 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
84 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
85 return False
85 return False
86 return True
86 return True
87
87
88 r = True
88 r = True
89 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
89 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
90 if hname.split(".", 1)[0] == name and cmd]
90 if hname.split(".", 1)[0] == name and cmd]
91 hooks.sort()
91 hooks.sort()
92 for hname, cmd in hooks:
92 for hname, cmd in hooks:
93 r = runhook(hname, cmd) and r
93 r = runhook(hname, cmd) and r
94 return r
94 return r
95
95
96 def tags(self):
96 def tags(self):
97 '''return a mapping of tag to node'''
97 '''return a mapping of tag to node'''
98 if not self.tagscache:
98 if not self.tagscache:
99 self.tagscache = {}
99 self.tagscache = {}
100
100
101 def parsetag(line, context):
101 def parsetag(line, context):
102 if not line:
102 if not line:
103 return
103 return
104 s = l.split(" ", 1)
104 s = l.split(" ", 1)
105 if len(s) != 2:
105 if len(s) != 2:
106 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
106 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
107 return
107 return
108 node, key = s
108 node, key = s
109 try:
109 try:
110 bin_n = bin(node)
110 bin_n = bin(node)
111 except TypeError:
111 except TypeError:
112 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
112 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
113 return
113 return
114 if bin_n not in self.changelog.nodemap:
114 if bin_n not in self.changelog.nodemap:
115 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
115 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
116 return
116 return
117 self.tagscache[key.strip()] = bin_n
117 self.tagscache[key.strip()] = bin_n
118
118
119 # read each head of the tags file, ending with the tip
119 # read each head of the tags file, ending with the tip
120 # and add each tag found to the map, with "newer" ones
120 # and add each tag found to the map, with "newer" ones
121 # taking precedence
121 # taking precedence
122 fl = self.file(".hgtags")
122 fl = self.file(".hgtags")
123 h = fl.heads()
123 h = fl.heads()
124 h.reverse()
124 h.reverse()
125 for r in h:
125 for r in h:
126 count = 0
126 count = 0
127 for l in fl.read(r).splitlines():
127 for l in fl.read(r).splitlines():
128 count += 1
128 count += 1
129 parsetag(l, ".hgtags:%d" % count)
129 parsetag(l, ".hgtags:%d" % count)
130
130
131 try:
131 try:
132 f = self.opener("localtags")
132 f = self.opener("localtags")
133 count = 0
133 count = 0
134 for l in f:
134 for l in f:
135 count += 1
135 count += 1
136 parsetag(l, "localtags:%d" % count)
136 parsetag(l, "localtags:%d" % count)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 self.tagscache['tip'] = self.changelog.tip()
140 self.tagscache['tip'] = self.changelog.tip()
141
141
142 return self.tagscache
142 return self.tagscache
143
143
144 def tagslist(self):
144 def tagslist(self):
145 '''return a list of tags ordered by revision'''
145 '''return a list of tags ordered by revision'''
146 l = []
146 l = []
147 for t, n in self.tags().items():
147 for t, n in self.tags().items():
148 try:
148 try:
149 r = self.changelog.rev(n)
149 r = self.changelog.rev(n)
150 except:
150 except:
151 r = -2 # sort to the beginning of the list if unknown
151 r = -2 # sort to the beginning of the list if unknown
152 l.append((r, t, n))
152 l.append((r, t, n))
153 l.sort()
153 l.sort()
154 return [(t, n) for r, t, n in l]
154 return [(t, n) for r, t, n in l]
155
155
156 def nodetags(self, node):
156 def nodetags(self, node):
157 '''return the tags associated with a node'''
157 '''return the tags associated with a node'''
158 if not self.nodetagscache:
158 if not self.nodetagscache:
159 self.nodetagscache = {}
159 self.nodetagscache = {}
160 for t, n in self.tags().items():
160 for t, n in self.tags().items():
161 self.nodetagscache.setdefault(n, []).append(t)
161 self.nodetagscache.setdefault(n, []).append(t)
162 return self.nodetagscache.get(node, [])
162 return self.nodetagscache.get(node, [])
163
163
164 def lookup(self, key):
164 def lookup(self, key):
165 try:
165 try:
166 return self.tags()[key]
166 return self.tags()[key]
167 except KeyError:
167 except KeyError:
168 try:
168 try:
169 return self.changelog.lookup(key)
169 return self.changelog.lookup(key)
170 except:
170 except:
171 raise repo.RepoError(_("unknown revision '%s'") % key)
171 raise repo.RepoError(_("unknown revision '%s'") % key)
172
172
173 def dev(self):
173 def dev(self):
174 return os.stat(self.path).st_dev
174 return os.stat(self.path).st_dev
175
175
176 def local(self):
176 def local(self):
177 return True
177 return True
178
178
179 def join(self, f):
179 def join(self, f):
180 return os.path.join(self.path, f)
180 return os.path.join(self.path, f)
181
181
182 def wjoin(self, f):
182 def wjoin(self, f):
183 return os.path.join(self.root, f)
183 return os.path.join(self.root, f)
184
184
185 def file(self, f):
185 def file(self, f):
186 if f[0] == '/':
186 if f[0] == '/':
187 f = f[1:]
187 f = f[1:]
188 return filelog.filelog(self.opener, f, self.revlogversion)
188 return filelog.filelog(self.opener, f, self.revlogversion)
189
189
190 def getcwd(self):
190 def getcwd(self):
191 return self.dirstate.getcwd()
191 return self.dirstate.getcwd()
192
192
193 def wfile(self, f, mode='r'):
193 def wfile(self, f, mode='r'):
194 return self.wopener(f, mode)
194 return self.wopener(f, mode)
195
195
196 def wread(self, filename):
196 def wread(self, filename):
197 if self.encodepats == None:
197 if self.encodepats == None:
198 l = []
198 l = []
199 for pat, cmd in self.ui.configitems("encode"):
199 for pat, cmd in self.ui.configitems("encode"):
200 mf = util.matcher(self.root, "", [pat], [], [])[1]
200 mf = util.matcher(self.root, "", [pat], [], [])[1]
201 l.append((mf, cmd))
201 l.append((mf, cmd))
202 self.encodepats = l
202 self.encodepats = l
203
203
204 data = self.wopener(filename, 'r').read()
204 data = self.wopener(filename, 'r').read()
205
205
206 for mf, cmd in self.encodepats:
206 for mf, cmd in self.encodepats:
207 if mf(filename):
207 if mf(filename):
208 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
208 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
209 data = util.filter(data, cmd)
209 data = util.filter(data, cmd)
210 break
210 break
211
211
212 return data
212 return data
213
213
214 def wwrite(self, filename, data, fd=None):
214 def wwrite(self, filename, data, fd=None):
215 if self.decodepats == None:
215 if self.decodepats == None:
216 l = []
216 l = []
217 for pat, cmd in self.ui.configitems("decode"):
217 for pat, cmd in self.ui.configitems("decode"):
218 mf = util.matcher(self.root, "", [pat], [], [])[1]
218 mf = util.matcher(self.root, "", [pat], [], [])[1]
219 l.append((mf, cmd))
219 l.append((mf, cmd))
220 self.decodepats = l
220 self.decodepats = l
221
221
222 for mf, cmd in self.decodepats:
222 for mf, cmd in self.decodepats:
223 if mf(filename):
223 if mf(filename):
224 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
224 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
225 data = util.filter(data, cmd)
225 data = util.filter(data, cmd)
226 break
226 break
227
227
228 if fd:
228 if fd:
229 return fd.write(data)
229 return fd.write(data)
230 return self.wopener(filename, 'w').write(data)
230 return self.wopener(filename, 'w').write(data)
231
231
232 def transaction(self):
232 def transaction(self):
233 tr = self.transhandle
233 tr = self.transhandle
234 if tr != None and tr.running():
234 if tr != None and tr.running():
235 return tr.nest()
235 return tr.nest()
236
236
237 # save dirstate for undo
237 # save dirstate for undo
238 try:
238 try:
239 ds = self.opener("dirstate").read()
239 ds = self.opener("dirstate").read()
240 except IOError:
240 except IOError:
241 ds = ""
241 ds = ""
242 self.opener("journal.dirstate", "w").write(ds)
242 self.opener("journal.dirstate", "w").write(ds)
243
243
244 tr = transaction.transaction(self.ui.warn, self.opener,
244 tr = transaction.transaction(self.ui.warn, self.opener,
245 self.join("journal"),
245 self.join("journal"),
246 aftertrans(self.path))
246 aftertrans(self.path))
247 self.transhandle = tr
247 self.transhandle = tr
248 return tr
248 return tr
249
249
250 def recover(self):
250 def recover(self):
251 l = self.lock()
251 l = self.lock()
252 if os.path.exists(self.join("journal")):
252 if os.path.exists(self.join("journal")):
253 self.ui.status(_("rolling back interrupted transaction\n"))
253 self.ui.status(_("rolling back interrupted transaction\n"))
254 transaction.rollback(self.opener, self.join("journal"))
254 transaction.rollback(self.opener, self.join("journal"))
255 self.reload()
255 self.reload()
256 return True
256 return True
257 else:
257 else:
258 self.ui.warn(_("no interrupted transaction available\n"))
258 self.ui.warn(_("no interrupted transaction available\n"))
259 return False
259 return False
260
260
261 def undo(self, wlock=None):
261 def undo(self, wlock=None):
262 if not wlock:
262 if not wlock:
263 wlock = self.wlock()
263 wlock = self.wlock()
264 l = self.lock()
264 l = self.lock()
265 if os.path.exists(self.join("undo")):
265 if os.path.exists(self.join("undo")):
266 self.ui.status(_("rolling back last transaction\n"))
266 self.ui.status(_("rolling back last transaction\n"))
267 transaction.rollback(self.opener, self.join("undo"))
267 transaction.rollback(self.opener, self.join("undo"))
268 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
268 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
269 self.reload()
269 self.reload()
270 self.wreload()
270 self.wreload()
271 else:
271 else:
272 self.ui.warn(_("no undo information available\n"))
272 self.ui.warn(_("no undo information available\n"))
273
273
274 def wreload(self):
274 def wreload(self):
275 self.dirstate.read()
275 self.dirstate.read()
276
276
277 def reload(self):
277 def reload(self):
278 self.changelog.load()
278 self.changelog.load()
279 self.manifest.load()
279 self.manifest.load()
280 self.tagscache = None
280 self.tagscache = None
281 self.nodetagscache = None
281 self.nodetagscache = None
282
282
283 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
283 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
284 desc=None):
284 desc=None):
285 try:
285 try:
286 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
286 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
287 except lock.LockHeld, inst:
287 except lock.LockHeld, inst:
288 if not wait:
288 if not wait:
289 raise
289 raise
290 self.ui.warn(_("waiting for lock on %s held by %s\n") %
290 self.ui.warn(_("waiting for lock on %s held by %s\n") %
291 (desc, inst.args[0]))
291 (desc, inst.args[0]))
292 # default to 600 seconds timeout
292 # default to 600 seconds timeout
293 l = lock.lock(self.join(lockname),
293 l = lock.lock(self.join(lockname),
294 int(self.ui.config("ui", "timeout") or 600),
294 int(self.ui.config("ui", "timeout") or 600),
295 releasefn, desc=desc)
295 releasefn, desc=desc)
296 if acquirefn:
296 if acquirefn:
297 acquirefn()
297 acquirefn()
298 return l
298 return l
299
299
300 def lock(self, wait=1):
300 def lock(self, wait=1):
301 return self.do_lock("lock", wait, acquirefn=self.reload,
301 return self.do_lock("lock", wait, acquirefn=self.reload,
302 desc=_('repository %s') % self.origroot)
302 desc=_('repository %s') % self.origroot)
303
303
304 def wlock(self, wait=1):
304 def wlock(self, wait=1):
305 return self.do_lock("wlock", wait, self.dirstate.write,
305 return self.do_lock("wlock", wait, self.dirstate.write,
306 self.wreload,
306 self.wreload,
307 desc=_('working directory of %s') % self.origroot)
307 desc=_('working directory of %s') % self.origroot)
308
308
309 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
309 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
310 "determine whether a new filenode is needed"
310 "determine whether a new filenode is needed"
311 fp1 = manifest1.get(filename, nullid)
311 fp1 = manifest1.get(filename, nullid)
312 fp2 = manifest2.get(filename, nullid)
312 fp2 = manifest2.get(filename, nullid)
313
313
314 if fp2 != nullid:
314 if fp2 != nullid:
315 # is one parent an ancestor of the other?
315 # is one parent an ancestor of the other?
316 fpa = filelog.ancestor(fp1, fp2)
316 fpa = filelog.ancestor(fp1, fp2)
317 if fpa == fp1:
317 if fpa == fp1:
318 fp1, fp2 = fp2, nullid
318 fp1, fp2 = fp2, nullid
319 elif fpa == fp2:
319 elif fpa == fp2:
320 fp2 = nullid
320 fp2 = nullid
321
321
322 # is the file unmodified from the parent? report existing entry
322 # is the file unmodified from the parent? report existing entry
323 if fp2 == nullid and text == filelog.read(fp1):
323 if fp2 == nullid and text == filelog.read(fp1):
324 return (fp1, None, None)
324 return (fp1, None, None)
325
325
326 return (None, fp1, fp2)
326 return (None, fp1, fp2)
327
327
328 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
328 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
329 orig_parent = self.dirstate.parents()[0] or nullid
329 orig_parent = self.dirstate.parents()[0] or nullid
330 p1 = p1 or self.dirstate.parents()[0] or nullid
330 p1 = p1 or self.dirstate.parents()[0] or nullid
331 p2 = p2 or self.dirstate.parents()[1] or nullid
331 p2 = p2 or self.dirstate.parents()[1] or nullid
332 c1 = self.changelog.read(p1)
332 c1 = self.changelog.read(p1)
333 c2 = self.changelog.read(p2)
333 c2 = self.changelog.read(p2)
334 m1 = self.manifest.read(c1[0])
334 m1 = self.manifest.read(c1[0])
335 mf1 = self.manifest.readflags(c1[0])
335 mf1 = self.manifest.readflags(c1[0])
336 m2 = self.manifest.read(c2[0])
336 m2 = self.manifest.read(c2[0])
337 changed = []
337 changed = []
338
338
339 if orig_parent == p1:
339 if orig_parent == p1:
340 update_dirstate = 1
340 update_dirstate = 1
341 else:
341 else:
342 update_dirstate = 0
342 update_dirstate = 0
343
343
344 if not wlock:
344 if not wlock:
345 wlock = self.wlock()
345 wlock = self.wlock()
346 l = self.lock()
346 l = self.lock()
347 tr = self.transaction()
347 tr = self.transaction()
348 mm = m1.copy()
348 mm = m1.copy()
349 mfm = mf1.copy()
349 mfm = mf1.copy()
350 linkrev = self.changelog.count()
350 linkrev = self.changelog.count()
351 for f in files:
351 for f in files:
352 try:
352 try:
353 t = self.wread(f)
353 t = self.wread(f)
354 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
354 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
355 r = self.file(f)
355 r = self.file(f)
356 mfm[f] = tm
356 mfm[f] = tm
357
357
358 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
358 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
359 if entry:
359 if entry:
360 mm[f] = entry
360 mm[f] = entry
361 continue
361 continue
362
362
363 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
363 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
364 changed.append(f)
364 changed.append(f)
365 if update_dirstate:
365 if update_dirstate:
366 self.dirstate.update([f], "n")
366 self.dirstate.update([f], "n")
367 except IOError:
367 except IOError:
368 try:
368 try:
369 del mm[f]
369 del mm[f]
370 del mfm[f]
370 del mfm[f]
371 if update_dirstate:
371 if update_dirstate:
372 self.dirstate.forget([f])
372 self.dirstate.forget([f])
373 except:
373 except:
374 # deleted from p2?
374 # deleted from p2?
375 pass
375 pass
376
376
377 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
377 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
378 user = user or self.ui.username()
378 user = user or self.ui.username()
379 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
379 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
380 tr.close()
380 tr.close()
381 if update_dirstate:
381 if update_dirstate:
382 self.dirstate.setparents(n, nullid)
382 self.dirstate.setparents(n, nullid)
383
383
384 def commit(self, files=None, text="", user=None, date=None,
384 def commit(self, files=None, text="", user=None, date=None,
385 match=util.always, force=False, lock=None, wlock=None):
385 match=util.always, force=False, lock=None, wlock=None):
386 commit = []
386 commit = []
387 remove = []
387 remove = []
388 changed = []
388 changed = []
389
389
390 if files:
390 if files:
391 for f in files:
391 for f in files:
392 s = self.dirstate.state(f)
392 s = self.dirstate.state(f)
393 if s in 'nmai':
393 if s in 'nmai':
394 commit.append(f)
394 commit.append(f)
395 elif s == 'r':
395 elif s == 'r':
396 remove.append(f)
396 remove.append(f)
397 else:
397 else:
398 self.ui.warn(_("%s not tracked!\n") % f)
398 self.ui.warn(_("%s not tracked!\n") % f)
399 else:
399 else:
400 modified, added, removed, deleted, unknown = self.changes(match=match)
400 modified, added, removed, deleted, unknown = self.changes(match=match)
401 commit = modified + added
401 commit = modified + added
402 remove = removed
402 remove = removed
403
403
404 p1, p2 = self.dirstate.parents()
404 p1, p2 = self.dirstate.parents()
405 c1 = self.changelog.read(p1)
405 c1 = self.changelog.read(p1)
406 c2 = self.changelog.read(p2)
406 c2 = self.changelog.read(p2)
407 m1 = self.manifest.read(c1[0])
407 m1 = self.manifest.read(c1[0])
408 mf1 = self.manifest.readflags(c1[0])
408 mf1 = self.manifest.readflags(c1[0])
409 m2 = self.manifest.read(c2[0])
409 m2 = self.manifest.read(c2[0])
410
410
411 if not commit and not remove and not force and p2 == nullid:
411 if not commit and not remove and not force and p2 == nullid:
412 self.ui.status(_("nothing changed\n"))
412 self.ui.status(_("nothing changed\n"))
413 return None
413 return None
414
414
415 xp1 = hex(p1)
415 xp1 = hex(p1)
416 if p2 == nullid: xp2 = ''
416 if p2 == nullid: xp2 = ''
417 else: xp2 = hex(p2)
417 else: xp2 = hex(p2)
418
418
419 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
419 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
420
420
421 if not wlock:
421 if not wlock:
422 wlock = self.wlock()
422 wlock = self.wlock()
423 if not lock:
423 if not lock:
424 lock = self.lock()
424 lock = self.lock()
425 tr = self.transaction()
425 tr = self.transaction()
426
426
427 # check in files
427 # check in files
428 new = {}
428 new = {}
429 linkrev = self.changelog.count()
429 linkrev = self.changelog.count()
430 commit.sort()
430 commit.sort()
431 for f in commit:
431 for f in commit:
432 self.ui.note(f + "\n")
432 self.ui.note(f + "\n")
433 try:
433 try:
434 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
434 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
435 t = self.wread(f)
435 t = self.wread(f)
436 except IOError:
436 except IOError:
437 self.ui.warn(_("trouble committing %s!\n") % f)
437 self.ui.warn(_("trouble committing %s!\n") % f)
438 raise
438 raise
439
439
440 r = self.file(f)
440 r = self.file(f)
441
441
442 meta = {}
442 meta = {}
443 cp = self.dirstate.copied(f)
443 cp = self.dirstate.copied(f)
444 if cp:
444 if cp:
445 meta["copy"] = cp
445 meta["copy"] = cp
446 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
446 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
447 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
447 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
448 fp1, fp2 = nullid, nullid
448 fp1, fp2 = nullid, nullid
449 else:
449 else:
450 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
450 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
451 if entry:
451 if entry:
452 new[f] = entry
452 new[f] = entry
453 continue
453 continue
454
454
455 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
455 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
456 # remember what we've added so that we can later calculate
456 # remember what we've added so that we can later calculate
457 # the files to pull from a set of changesets
457 # the files to pull from a set of changesets
458 changed.append(f)
458 changed.append(f)
459
459
460 # update manifest
460 # update manifest
461 m1 = m1.copy()
461 m1 = m1.copy()
462 m1.update(new)
462 m1.update(new)
463 for f in remove:
463 for f in remove:
464 if f in m1:
464 if f in m1:
465 del m1[f]
465 del m1[f]
466 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
466 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
467 (new, remove))
467 (new, remove))
468
468
469 # add changeset
469 # add changeset
470 new = new.keys()
470 new = new.keys()
471 new.sort()
471 new.sort()
472
472
473 user = user or self.ui.username()
473 user = user or self.ui.username()
474 if not text:
474 if not text:
475 edittext = [""]
475 edittext = [""]
476 if p2 != nullid:
476 if p2 != nullid:
477 edittext.append("HG: branch merge")
477 edittext.append("HG: branch merge")
478 edittext.extend(["HG: changed %s" % f for f in changed])
478 edittext.extend(["HG: changed %s" % f for f in changed])
479 edittext.extend(["HG: removed %s" % f for f in remove])
479 edittext.extend(["HG: removed %s" % f for f in remove])
480 if not changed and not remove:
480 if not changed and not remove:
481 edittext.append("HG: no files changed")
481 edittext.append("HG: no files changed")
482 edittext.append("")
482 edittext.append("")
483 # run editor in the repository root
483 # run editor in the repository root
484 olddir = os.getcwd()
484 olddir = os.getcwd()
485 os.chdir(self.root)
485 os.chdir(self.root)
486 edittext = self.ui.edit("\n".join(edittext), user)
486 edittext = self.ui.edit("\n".join(edittext), user)
487 os.chdir(olddir)
487 os.chdir(olddir)
488 if not edittext.rstrip():
488 if not edittext.rstrip():
489 return None
489 return None
490 text = edittext
490 text = edittext
491
491
492 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
492 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
493 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
493 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
494 parent2=xp2)
494 parent2=xp2)
495 tr.close()
495 tr.close()
496
496
497 self.dirstate.setparents(n)
497 self.dirstate.setparents(n)
498 self.dirstate.update(new, "n")
498 self.dirstate.update(new, "n")
499 self.dirstate.forget(remove)
499 self.dirstate.forget(remove)
500
500
501 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
501 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
502 return n
502 return n
503
503
504 def walk(self, node=None, files=[], match=util.always, badmatch=None):
504 def walk(self, node=None, files=[], match=util.always, badmatch=None):
505 if node:
505 if node:
506 fdict = dict.fromkeys(files)
506 fdict = dict.fromkeys(files)
507 for fn in self.manifest.read(self.changelog.read(node)[0]):
507 for fn in self.manifest.read(self.changelog.read(node)[0]):
508 fdict.pop(fn, None)
508 fdict.pop(fn, None)
509 if match(fn):
509 if match(fn):
510 yield 'm', fn
510 yield 'm', fn
511 for fn in fdict:
511 for fn in fdict:
512 if badmatch and badmatch(fn):
512 if badmatch and badmatch(fn):
513 if match(fn):
513 if match(fn):
514 yield 'b', fn
514 yield 'b', fn
515 else:
515 else:
516 self.ui.warn(_('%s: No such file in rev %s\n') % (
516 self.ui.warn(_('%s: No such file in rev %s\n') % (
517 util.pathto(self.getcwd(), fn), short(node)))
517 util.pathto(self.getcwd(), fn), short(node)))
518 else:
518 else:
519 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
519 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
520 yield src, fn
520 yield src, fn
521
521
522 def changes(self, node1=None, node2=None, files=[], match=util.always,
522 def changes(self, node1=None, node2=None, files=[], match=util.always,
523 wlock=None, show_ignored=None):
523 wlock=None, show_ignored=None):
524 """return changes between two nodes or node and working directory
524 """return changes between two nodes or node and working directory
525
525
526 If node1 is None, use the first dirstate parent instead.
526 If node1 is None, use the first dirstate parent instead.
527 If node2 is None, compare node1 with working directory.
527 If node2 is None, compare node1 with working directory.
528 """
528 """
529
529
530 def fcmp(fn, mf):
530 def fcmp(fn, mf):
531 t1 = self.wread(fn)
531 t1 = self.wread(fn)
532 t2 = self.file(fn).read(mf.get(fn, nullid))
532 t2 = self.file(fn).read(mf.get(fn, nullid))
533 return cmp(t1, t2)
533 return cmp(t1, t2)
534
534
535 def mfmatches(node):
535 def mfmatches(node):
536 change = self.changelog.read(node)
536 change = self.changelog.read(node)
537 mf = dict(self.manifest.read(change[0]))
537 mf = dict(self.manifest.read(change[0]))
538 for fn in mf.keys():
538 for fn in mf.keys():
539 if not match(fn):
539 if not match(fn):
540 del mf[fn]
540 del mf[fn]
541 return mf
541 return mf
542
542
543 if node1:
543 if node1:
544 # read the manifest from node1 before the manifest from node2,
544 # read the manifest from node1 before the manifest from node2,
545 # so that we'll hit the manifest cache if we're going through
545 # so that we'll hit the manifest cache if we're going through
546 # all the revisions in parent->child order.
546 # all the revisions in parent->child order.
547 mf1 = mfmatches(node1)
547 mf1 = mfmatches(node1)
548
548
549 # are we comparing the working directory?
549 # are we comparing the working directory?
550 if not node2:
550 if not node2:
551 if not wlock:
551 if not wlock:
552 try:
552 try:
553 wlock = self.wlock(wait=0)
553 wlock = self.wlock(wait=0)
554 except lock.LockException:
554 except lock.LockException:
555 wlock = None
555 wlock = None
556 lookup, modified, added, removed, deleted, unknown, ignored = (
556 lookup, modified, added, removed, deleted, unknown, ignored = (
557 self.dirstate.changes(files, match, show_ignored))
557 self.dirstate.changes(files, match, show_ignored))
558
558
559 # are we comparing working dir against its parent?
559 # are we comparing working dir against its parent?
560 if not node1:
560 if not node1:
561 if lookup:
561 if lookup:
562 # do a full compare of any files that might have changed
562 # do a full compare of any files that might have changed
563 mf2 = mfmatches(self.dirstate.parents()[0])
563 mf2 = mfmatches(self.dirstate.parents()[0])
564 for f in lookup:
564 for f in lookup:
565 if fcmp(f, mf2):
565 if fcmp(f, mf2):
566 modified.append(f)
566 modified.append(f)
567 elif wlock is not None:
567 elif wlock is not None:
568 self.dirstate.update([f], "n")
568 self.dirstate.update([f], "n")
569 else:
569 else:
570 # we are comparing working dir against non-parent
570 # we are comparing working dir against non-parent
571 # generate a pseudo-manifest for the working dir
571 # generate a pseudo-manifest for the working dir
572 mf2 = mfmatches(self.dirstate.parents()[0])
572 mf2 = mfmatches(self.dirstate.parents()[0])
573 for f in lookup + modified + added:
573 for f in lookup + modified + added:
574 mf2[f] = ""
574 mf2[f] = ""
575 for f in removed:
575 for f in removed:
576 if f in mf2:
576 if f in mf2:
577 del mf2[f]
577 del mf2[f]
578 else:
578 else:
579 # we are comparing two revisions
579 # we are comparing two revisions
580 deleted, unknown, ignored = [], [], []
580 deleted, unknown, ignored = [], [], []
581 mf2 = mfmatches(node2)
581 mf2 = mfmatches(node2)
582
582
583 if node1:
583 if node1:
584 # flush lists from dirstate before comparing manifests
584 # flush lists from dirstate before comparing manifests
585 modified, added = [], []
585 modified, added = [], []
586
586
587 for fn in mf2:
587 for fn in mf2:
588 if mf1.has_key(fn):
588 if mf1.has_key(fn):
589 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
589 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
590 modified.append(fn)
590 modified.append(fn)
591 del mf1[fn]
591 del mf1[fn]
592 else:
592 else:
593 added.append(fn)
593 added.append(fn)
594
594
595 removed = mf1.keys()
595 removed = mf1.keys()
596
596
597 # sort and return results:
597 # sort and return results:
598 for l in modified, added, removed, deleted, unknown, ignored:
598 for l in modified, added, removed, deleted, unknown, ignored:
599 l.sort()
599 l.sort()
600 if show_ignored is None:
600 if show_ignored is None:
601 return (modified, added, removed, deleted, unknown)
601 return (modified, added, removed, deleted, unknown)
602 else:
602 else:
603 return (modified, added, removed, deleted, unknown, ignored)
603 return (modified, added, removed, deleted, unknown, ignored)
604
604
605 def add(self, list, wlock=None):
605 def add(self, list, wlock=None):
606 if not wlock:
606 if not wlock:
607 wlock = self.wlock()
607 wlock = self.wlock()
608 for f in list:
608 for f in list:
609 p = self.wjoin(f)
609 p = self.wjoin(f)
610 if not os.path.exists(p):
610 if not os.path.exists(p):
611 self.ui.warn(_("%s does not exist!\n") % f)
611 self.ui.warn(_("%s does not exist!\n") % f)
612 elif not os.path.isfile(p):
612 elif not os.path.isfile(p):
613 self.ui.warn(_("%s not added: only files supported currently\n")
613 self.ui.warn(_("%s not added: only files supported currently\n")
614 % f)
614 % f)
615 elif self.dirstate.state(f) in 'an':
615 elif self.dirstate.state(f) in 'an':
616 self.ui.warn(_("%s already tracked!\n") % f)
616 self.ui.warn(_("%s already tracked!\n") % f)
617 else:
617 else:
618 self.dirstate.update([f], "a")
618 self.dirstate.update([f], "a")
619
619
620 def forget(self, list, wlock=None):
620 def forget(self, list, wlock=None):
621 if not wlock:
621 if not wlock:
622 wlock = self.wlock()
622 wlock = self.wlock()
623 for f in list:
623 for f in list:
624 if self.dirstate.state(f) not in 'ai':
624 if self.dirstate.state(f) not in 'ai':
625 self.ui.warn(_("%s not added!\n") % f)
625 self.ui.warn(_("%s not added!\n") % f)
626 else:
626 else:
627 self.dirstate.forget([f])
627 self.dirstate.forget([f])
628
628
629 def remove(self, list, unlink=False, wlock=None):
629 def remove(self, list, unlink=False, wlock=None):
630 if unlink:
630 if unlink:
631 for f in list:
631 for f in list:
632 try:
632 try:
633 util.unlink(self.wjoin(f))
633 util.unlink(self.wjoin(f))
634 except OSError, inst:
634 except OSError, inst:
635 if inst.errno != errno.ENOENT:
635 if inst.errno != errno.ENOENT:
636 raise
636 raise
637 if not wlock:
637 if not wlock:
638 wlock = self.wlock()
638 wlock = self.wlock()
639 for f in list:
639 for f in list:
640 p = self.wjoin(f)
640 p = self.wjoin(f)
641 if os.path.exists(p):
641 if os.path.exists(p):
642 self.ui.warn(_("%s still exists!\n") % f)
642 self.ui.warn(_("%s still exists!\n") % f)
643 elif self.dirstate.state(f) == 'a':
643 elif self.dirstate.state(f) == 'a':
644 self.dirstate.forget([f])
644 self.dirstate.forget([f])
645 elif f not in self.dirstate:
645 elif f not in self.dirstate:
646 self.ui.warn(_("%s not tracked!\n") % f)
646 self.ui.warn(_("%s not tracked!\n") % f)
647 else:
647 else:
648 self.dirstate.update([f], "r")
648 self.dirstate.update([f], "r")
649
649
650 def undelete(self, list, wlock=None):
650 def undelete(self, list, wlock=None):
651 p = self.dirstate.parents()[0]
651 p = self.dirstate.parents()[0]
652 mn = self.changelog.read(p)[0]
652 mn = self.changelog.read(p)[0]
653 mf = self.manifest.readflags(mn)
653 mf = self.manifest.readflags(mn)
654 m = self.manifest.read(mn)
654 m = self.manifest.read(mn)
655 if not wlock:
655 if not wlock:
656 wlock = self.wlock()
656 wlock = self.wlock()
657 for f in list:
657 for f in list:
658 if self.dirstate.state(f) not in "r":
658 if self.dirstate.state(f) not in "r":
659 self.ui.warn("%s not removed!\n" % f)
659 self.ui.warn("%s not removed!\n" % f)
660 else:
660 else:
661 t = self.file(f).read(m[f])
661 t = self.file(f).read(m[f])
662 self.wwrite(f, t)
662 self.wwrite(f, t)
663 util.set_exec(self.wjoin(f), mf[f])
663 util.set_exec(self.wjoin(f), mf[f])
664 self.dirstate.update([f], "n")
664 self.dirstate.update([f], "n")
665
665
666 def copy(self, source, dest, wlock=None):
666 def copy(self, source, dest, wlock=None):
667 p = self.wjoin(dest)
667 p = self.wjoin(dest)
668 if not os.path.exists(p):
668 if not os.path.exists(p):
669 self.ui.warn(_("%s does not exist!\n") % dest)
669 self.ui.warn(_("%s does not exist!\n") % dest)
670 elif not os.path.isfile(p):
670 elif not os.path.isfile(p):
671 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
671 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
672 else:
672 else:
673 if not wlock:
673 if not wlock:
674 wlock = self.wlock()
674 wlock = self.wlock()
675 if self.dirstate.state(dest) == '?':
675 if self.dirstate.state(dest) == '?':
676 self.dirstate.update([dest], "a")
676 self.dirstate.update([dest], "a")
677 self.dirstate.copy(source, dest)
677 self.dirstate.copy(source, dest)
678
678
679 def heads(self, start=None):
679 def heads(self, start=None):
680 heads = self.changelog.heads(start)
680 heads = self.changelog.heads(start)
681 # sort the output in rev descending order
681 # sort the output in rev descending order
682 heads = [(-self.changelog.rev(h), h) for h in heads]
682 heads = [(-self.changelog.rev(h), h) for h in heads]
683 heads.sort()
683 heads.sort()
684 return [n for (r, n) in heads]
684 return [n for (r, n) in heads]
685
685
686 # branchlookup returns a dict giving a list of branches for
686 # branchlookup returns a dict giving a list of branches for
687 # each head. A branch is defined as the tag of a node or
687 # each head. A branch is defined as the tag of a node or
688 # the branch of the node's parents. If a node has multiple
688 # the branch of the node's parents. If a node has multiple
689 # branch tags, tags are eliminated if they are visible from other
689 # branch tags, tags are eliminated if they are visible from other
690 # branch tags.
690 # branch tags.
691 #
691 #
692 # So, for this graph: a->b->c->d->e
692 # So, for this graph: a->b->c->d->e
693 # \ /
693 # \ /
694 # aa -----/
694 # aa -----/
695 # a has tag 2.6.12
695 # a has tag 2.6.12
696 # d has tag 2.6.13
696 # d has tag 2.6.13
697 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
697 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
698 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
698 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
699 # from the list.
699 # from the list.
700 #
700 #
701 # It is possible that more than one head will have the same branch tag.
701 # It is possible that more than one head will have the same branch tag.
702 # callers need to check the result for multiple heads under the same
702 # callers need to check the result for multiple heads under the same
703 # branch tag if that is a problem for them (ie checkout of a specific
703 # branch tag if that is a problem for them (ie checkout of a specific
704 # branch).
704 # branch).
705 #
705 #
706 # passing in a specific branch will limit the depth of the search
706 # passing in a specific branch will limit the depth of the search
707 # through the parents. It won't limit the branches returned in the
707 # through the parents. It won't limit the branches returned in the
708 # result though.
708 # result though.
709 def branchlookup(self, heads=None, branch=None):
709 def branchlookup(self, heads=None, branch=None):
710 if not heads:
710 if not heads:
711 heads = self.heads()
711 heads = self.heads()
712 headt = [ h for h in heads ]
712 headt = [ h for h in heads ]
713 chlog = self.changelog
713 chlog = self.changelog
714 branches = {}
714 branches = {}
715 merges = []
715 merges = []
716 seenmerge = {}
716 seenmerge = {}
717
717
718 # traverse the tree once for each head, recording in the branches
718 # traverse the tree once for each head, recording in the branches
719 # dict which tags are visible from this head. The branches
719 # dict which tags are visible from this head. The branches
720 # dict also records which tags are visible from each tag
720 # dict also records which tags are visible from each tag
721 # while we traverse.
721 # while we traverse.
722 while headt or merges:
722 while headt or merges:
723 if merges:
723 if merges:
724 n, found = merges.pop()
724 n, found = merges.pop()
725 visit = [n]
725 visit = [n]
726 else:
726 else:
727 h = headt.pop()
727 h = headt.pop()
728 visit = [h]
728 visit = [h]
729 found = [h]
729 found = [h]
730 seen = {}
730 seen = {}
731 while visit:
731 while visit:
732 n = visit.pop()
732 n = visit.pop()
733 if n in seen:
733 if n in seen:
734 continue
734 continue
735 pp = chlog.parents(n)
735 pp = chlog.parents(n)
736 tags = self.nodetags(n)
736 tags = self.nodetags(n)
737 if tags:
737 if tags:
738 for x in tags:
738 for x in tags:
739 if x == 'tip':
739 if x == 'tip':
740 continue
740 continue
741 for f in found:
741 for f in found:
742 branches.setdefault(f, {})[n] = 1
742 branches.setdefault(f, {})[n] = 1
743 branches.setdefault(n, {})[n] = 1
743 branches.setdefault(n, {})[n] = 1
744 break
744 break
745 if n not in found:
745 if n not in found:
746 found.append(n)
746 found.append(n)
747 if branch in tags:
747 if branch in tags:
748 continue
748 continue
749 seen[n] = 1
749 seen[n] = 1
750 if pp[1] != nullid and n not in seenmerge:
750 if pp[1] != nullid and n not in seenmerge:
751 merges.append((pp[1], [x for x in found]))
751 merges.append((pp[1], [x for x in found]))
752 seenmerge[n] = 1
752 seenmerge[n] = 1
753 if pp[0] != nullid:
753 if pp[0] != nullid:
754 visit.append(pp[0])
754 visit.append(pp[0])
755 # traverse the branches dict, eliminating branch tags from each
755 # traverse the branches dict, eliminating branch tags from each
756 # head that are visible from another branch tag for that head.
756 # head that are visible from another branch tag for that head.
757 out = {}
757 out = {}
758 viscache = {}
758 viscache = {}
759 for h in heads:
759 for h in heads:
760 def visible(node):
760 def visible(node):
761 if node in viscache:
761 if node in viscache:
762 return viscache[node]
762 return viscache[node]
763 ret = {}
763 ret = {}
764 visit = [node]
764 visit = [node]
765 while visit:
765 while visit:
766 x = visit.pop()
766 x = visit.pop()
767 if x in viscache:
767 if x in viscache:
768 ret.update(viscache[x])
768 ret.update(viscache[x])
769 elif x not in ret:
769 elif x not in ret:
770 ret[x] = 1
770 ret[x] = 1
771 if x in branches:
771 if x in branches:
772 visit[len(visit):] = branches[x].keys()
772 visit[len(visit):] = branches[x].keys()
773 viscache[node] = ret
773 viscache[node] = ret
774 return ret
774 return ret
775 if h not in branches:
775 if h not in branches:
776 continue
776 continue
777 # O(n^2), but somewhat limited. This only searches the
777 # O(n^2), but somewhat limited. This only searches the
778 # tags visible from a specific head, not all the tags in the
778 # tags visible from a specific head, not all the tags in the
779 # whole repo.
779 # whole repo.
780 for b in branches[h]:
780 for b in branches[h]:
781 vis = False
781 vis = False
782 for bb in branches[h].keys():
782 for bb in branches[h].keys():
783 if b != bb:
783 if b != bb:
784 if b in visible(bb):
784 if b in visible(bb):
785 vis = True
785 vis = True
786 break
786 break
787 if not vis:
787 if not vis:
788 l = out.setdefault(h, [])
788 l = out.setdefault(h, [])
789 l[len(l):] = self.nodetags(b)
789 l[len(l):] = self.nodetags(b)
790 return out
790 return out
791
791
792 def branches(self, nodes):
792 def branches(self, nodes):
793 if not nodes:
793 if not nodes:
794 nodes = [self.changelog.tip()]
794 nodes = [self.changelog.tip()]
795 b = []
795 b = []
796 for n in nodes:
796 for n in nodes:
797 t = n
797 t = n
798 while n:
798 while n:
799 p = self.changelog.parents(n)
799 p = self.changelog.parents(n)
800 if p[1] != nullid or p[0] == nullid:
800 if p[1] != nullid or p[0] == nullid:
801 b.append((t, n, p[0], p[1]))
801 b.append((t, n, p[0], p[1]))
802 break
802 break
803 n = p[0]
803 n = p[0]
804 return b
804 return b
805
805
806 def between(self, pairs):
806 def between(self, pairs):
807 r = []
807 r = []
808
808
809 for top, bottom in pairs:
809 for top, bottom in pairs:
810 n, l, i = top, [], 0
810 n, l, i = top, [], 0
811 f = 1
811 f = 1
812
812
813 while n != bottom:
813 while n != bottom:
814 p = self.changelog.parents(n)[0]
814 p = self.changelog.parents(n)[0]
815 if i == f:
815 if i == f:
816 l.append(n)
816 l.append(n)
817 f = f * 2
817 f = f * 2
818 n = p
818 n = p
819 i += 1
819 i += 1
820
820
821 r.append(l)
821 r.append(l)
822
822
823 return r
823 return r
824
824
825 def findincoming(self, remote, base=None, heads=None, force=False):
825 def findincoming(self, remote, base=None, heads=None, force=False):
826 m = self.changelog.nodemap
826 m = self.changelog.nodemap
827 search = []
827 search = []
828 fetch = {}
828 fetch = {}
829 seen = {}
829 seen = {}
830 seenbranch = {}
830 seenbranch = {}
831 if base == None:
831 if base == None:
832 base = {}
832 base = {}
833
833
834 if not heads:
834 if not heads:
835 heads = remote.heads()
835 heads = remote.heads()
836
836
837 if self.changelog.tip() == nullid:
837 if self.changelog.tip() == nullid:
838 if heads != [nullid]:
838 if heads != [nullid]:
839 return [nullid]
839 return [nullid]
840 return []
840 return []
841
841
842 # assume we're closer to the tip than the root
842 # assume we're closer to the tip than the root
843 # and start by examining the heads
843 # and start by examining the heads
844 self.ui.status(_("searching for changes\n"))
844 self.ui.status(_("searching for changes\n"))
845
845
846 unknown = []
846 unknown = []
847 for h in heads:
847 for h in heads:
848 if h not in m:
848 if h not in m:
849 unknown.append(h)
849 unknown.append(h)
850 else:
850 else:
851 base[h] = 1
851 base[h] = 1
852
852
853 if not unknown:
853 if not unknown:
854 return []
854 return []
855
855
856 rep = {}
856 rep = {}
857 reqcnt = 0
857 reqcnt = 0
858
858
859 # search through remote branches
859 # search through remote branches
860 # a 'branch' here is a linear segment of history, with four parts:
860 # a 'branch' here is a linear segment of history, with four parts:
861 # head, root, first parent, second parent
861 # head, root, first parent, second parent
862 # (a branch always has two parents (or none) by definition)
862 # (a branch always has two parents (or none) by definition)
863 unknown = remote.branches(unknown)
863 unknown = remote.branches(unknown)
864 while unknown:
864 while unknown:
865 r = []
865 r = []
866 while unknown:
866 while unknown:
867 n = unknown.pop(0)
867 n = unknown.pop(0)
868 if n[0] in seen:
868 if n[0] in seen:
869 continue
869 continue
870
870
871 self.ui.debug(_("examining %s:%s\n")
871 self.ui.debug(_("examining %s:%s\n")
872 % (short(n[0]), short(n[1])))
872 % (short(n[0]), short(n[1])))
873 if n[0] == nullid:
873 if n[0] == nullid:
874 break
874 break
875 if n in seenbranch:
875 if n in seenbranch:
876 self.ui.debug(_("branch already found\n"))
876 self.ui.debug(_("branch already found\n"))
877 continue
877 continue
878 if n[1] and n[1] in m: # do we know the base?
878 if n[1] and n[1] in m: # do we know the base?
879 self.ui.debug(_("found incomplete branch %s:%s\n")
879 self.ui.debug(_("found incomplete branch %s:%s\n")
880 % (short(n[0]), short(n[1])))
880 % (short(n[0]), short(n[1])))
881 search.append(n) # schedule branch range for scanning
881 search.append(n) # schedule branch range for scanning
882 seenbranch[n] = 1
882 seenbranch[n] = 1
883 else:
883 else:
884 if n[1] not in seen and n[1] not in fetch:
884 if n[1] not in seen and n[1] not in fetch:
885 if n[2] in m and n[3] in m:
885 if n[2] in m and n[3] in m:
886 self.ui.debug(_("found new changeset %s\n") %
886 self.ui.debug(_("found new changeset %s\n") %
887 short(n[1]))
887 short(n[1]))
888 fetch[n[1]] = 1 # earliest unknown
888 fetch[n[1]] = 1 # earliest unknown
889 base[n[2]] = 1 # latest known
889 base[n[2]] = 1 # latest known
890 continue
890 continue
891
891
892 for a in n[2:4]:
892 for a in n[2:4]:
893 if a not in rep:
893 if a not in rep:
894 r.append(a)
894 r.append(a)
895 rep[a] = 1
895 rep[a] = 1
896
896
897 seen[n[0]] = 1
897 seen[n[0]] = 1
898
898
899 if r:
899 if r:
900 reqcnt += 1
900 reqcnt += 1
901 self.ui.debug(_("request %d: %s\n") %
901 self.ui.debug(_("request %d: %s\n") %
902 (reqcnt, " ".join(map(short, r))))
902 (reqcnt, " ".join(map(short, r))))
903 for p in range(0, len(r), 10):
903 for p in range(0, len(r), 10):
904 for b in remote.branches(r[p:p+10]):
904 for b in remote.branches(r[p:p+10]):
905 self.ui.debug(_("received %s:%s\n") %
905 self.ui.debug(_("received %s:%s\n") %
906 (short(b[0]), short(b[1])))
906 (short(b[0]), short(b[1])))
907 if b[0] in m:
907 if b[0] in m:
908 self.ui.debug(_("found base node %s\n")
908 self.ui.debug(_("found base node %s\n")
909 % short(b[0]))
909 % short(b[0]))
910 base[b[0]] = 1
910 base[b[0]] = 1
911 elif b[0] not in seen:
911 elif b[0] not in seen:
912 unknown.append(b)
912 unknown.append(b)
913
913
914 # do binary search on the branches we found
914 # do binary search on the branches we found
915 while search:
915 while search:
916 n = search.pop(0)
916 n = search.pop(0)
917 reqcnt += 1
917 reqcnt += 1
918 l = remote.between([(n[0], n[1])])[0]
918 l = remote.between([(n[0], n[1])])[0]
919 l.append(n[1])
919 l.append(n[1])
920 p = n[0]
920 p = n[0]
921 f = 1
921 f = 1
922 for i in l:
922 for i in l:
923 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
923 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
924 if i in m:
924 if i in m:
925 if f <= 2:
925 if f <= 2:
926 self.ui.debug(_("found new branch changeset %s\n") %
926 self.ui.debug(_("found new branch changeset %s\n") %
927 short(p))
927 short(p))
928 fetch[p] = 1
928 fetch[p] = 1
929 base[i] = 1
929 base[i] = 1
930 else:
930 else:
931 self.ui.debug(_("narrowed branch search to %s:%s\n")
931 self.ui.debug(_("narrowed branch search to %s:%s\n")
932 % (short(p), short(i)))
932 % (short(p), short(i)))
933 search.append((p, i))
933 search.append((p, i))
934 break
934 break
935 p, f = i, f * 2
935 p, f = i, f * 2
936
936
937 # sanity check our fetch list
937 # sanity check our fetch list
938 for f in fetch.keys():
938 for f in fetch.keys():
939 if f in m:
939 if f in m:
940 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
940 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
941
941
942 if base.keys() == [nullid]:
942 if base.keys() == [nullid]:
943 if force:
943 if force:
944 self.ui.warn(_("warning: repository is unrelated\n"))
944 self.ui.warn(_("warning: repository is unrelated\n"))
945 else:
945 else:
946 raise util.Abort(_("repository is unrelated"))
946 raise util.Abort(_("repository is unrelated"))
947
947
948 self.ui.note(_("found new changesets starting at ") +
948 self.ui.note(_("found new changesets starting at ") +
949 " ".join([short(f) for f in fetch]) + "\n")
949 " ".join([short(f) for f in fetch]) + "\n")
950
950
951 self.ui.debug(_("%d total queries\n") % reqcnt)
951 self.ui.debug(_("%d total queries\n") % reqcnt)
952
952
953 return fetch.keys()
953 return fetch.keys()
954
954
955 def findoutgoing(self, remote, base=None, heads=None, force=False):
955 def findoutgoing(self, remote, base=None, heads=None, force=False):
956 """Return list of nodes that are roots of subsets not in remote
956 """Return list of nodes that are roots of subsets not in remote
957
957
958 If base dict is specified, assume that these nodes and their parents
958 If base dict is specified, assume that these nodes and their parents
959 exist on the remote side.
959 exist on the remote side.
960 If a list of heads is specified, return only nodes which are heads
960 If a list of heads is specified, return only nodes which are heads
961 or ancestors of these heads, and return a second element which
961 or ancestors of these heads, and return a second element which
962 contains all remote heads which get new children.
962 contains all remote heads which get new children.
963 """
963 """
964 if base == None:
964 if base == None:
965 base = {}
965 base = {}
966 self.findincoming(remote, base, heads, force=force)
966 self.findincoming(remote, base, heads, force=force)
967
967
968 self.ui.debug(_("common changesets up to ")
968 self.ui.debug(_("common changesets up to ")
969 + " ".join(map(short, base.keys())) + "\n")
969 + " ".join(map(short, base.keys())) + "\n")
970
970
971 remain = dict.fromkeys(self.changelog.nodemap)
971 remain = dict.fromkeys(self.changelog.nodemap)
972
972
973 # prune everything remote has from the tree
973 # prune everything remote has from the tree
974 del remain[nullid]
974 del remain[nullid]
975 remove = base.keys()
975 remove = base.keys()
976 while remove:
976 while remove:
977 n = remove.pop(0)
977 n = remove.pop(0)
978 if n in remain:
978 if n in remain:
979 del remain[n]
979 del remain[n]
980 for p in self.changelog.parents(n):
980 for p in self.changelog.parents(n):
981 remove.append(p)
981 remove.append(p)
982
982
983 # find every node whose parents have been pruned
983 # find every node whose parents have been pruned
984 subset = []
984 subset = []
985 # find every remote head that will get new children
985 # find every remote head that will get new children
986 updated_heads = {}
986 updated_heads = {}
987 for n in remain:
987 for n in remain:
988 p1, p2 = self.changelog.parents(n)
988 p1, p2 = self.changelog.parents(n)
989 if p1 not in remain and p2 not in remain:
989 if p1 not in remain and p2 not in remain:
990 subset.append(n)
990 subset.append(n)
991 if heads:
991 if heads:
992 if p1 in heads:
992 if p1 in heads:
993 updated_heads[p1] = True
993 updated_heads[p1] = True
994 if p2 in heads:
994 if p2 in heads:
995 updated_heads[p2] = True
995 updated_heads[p2] = True
996
996
997 # this is the set of all roots we have to push
997 # this is the set of all roots we have to push
998 if heads:
998 if heads:
999 return subset, updated_heads.keys()
999 return subset, updated_heads.keys()
1000 else:
1000 else:
1001 return subset
1001 return subset
1002
1002
1003 def pull(self, remote, heads=None, force=False):
1003 def pull(self, remote, heads=None, force=False):
1004 l = self.lock()
1004 l = self.lock()
1005
1005
1006 fetch = self.findincoming(remote, force=force)
1006 fetch = self.findincoming(remote, force=force)
1007 if fetch == [nullid]:
1007 if fetch == [nullid]:
1008 self.ui.status(_("requesting all changes\n"))
1008 self.ui.status(_("requesting all changes\n"))
1009
1009
1010 if not fetch:
1010 if not fetch:
1011 self.ui.status(_("no changes found\n"))
1011 self.ui.status(_("no changes found\n"))
1012 return 0
1012 return 0
1013
1013
1014 if heads is None:
1014 if heads is None:
1015 cg = remote.changegroup(fetch, 'pull')
1015 cg = remote.changegroup(fetch, 'pull')
1016 else:
1016 else:
1017 cg = remote.changegroupsubset(fetch, heads, 'pull')
1017 cg = remote.changegroupsubset(fetch, heads, 'pull')
1018 return self.addchangegroup(cg)
1018 return self.addchangegroup(cg)
1019
1019
1020 def push(self, remote, force=False, revs=None):
1020 def push(self, remote, force=False, revs=None):
1021 lock = remote.lock()
1021 lock = remote.lock()
1022
1022
1023 base = {}
1023 base = {}
1024 remote_heads = remote.heads()
1024 remote_heads = remote.heads()
1025 inc = self.findincoming(remote, base, remote_heads, force=force)
1025 inc = self.findincoming(remote, base, remote_heads, force=force)
1026 if not force and inc:
1026 if not force and inc:
1027 self.ui.warn(_("abort: unsynced remote changes!\n"))
1027 self.ui.warn(_("abort: unsynced remote changes!\n"))
1028 self.ui.status(_("(did you forget to sync?"
1028 self.ui.status(_("(did you forget to sync?"
1029 " use push -f to force)\n"))
1029 " use push -f to force)\n"))
1030 return 1
1030 return 1
1031
1031
1032 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1032 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1033 if revs is not None:
1033 if revs is not None:
1034 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1034 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1035 else:
1035 else:
1036 bases, heads = update, self.changelog.heads()
1036 bases, heads = update, self.changelog.heads()
1037
1037
1038 if not bases:
1038 if not bases:
1039 self.ui.status(_("no changes found\n"))
1039 self.ui.status(_("no changes found\n"))
1040 return 1
1040 return 1
1041 elif not force:
1041 elif not force:
1042 # FIXME we don't properly detect creation of new heads
1042 # FIXME we don't properly detect creation of new heads
1043 # in the push -r case, assume the user knows what he's doing
1043 # in the push -r case, assume the user knows what he's doing
1044 if not revs and len(remote_heads) < len(heads) \
1044 if not revs and len(remote_heads) < len(heads) \
1045 and remote_heads != [nullid]:
1045 and remote_heads != [nullid]:
1046 self.ui.warn(_("abort: push creates new remote branches!\n"))
1046 self.ui.warn(_("abort: push creates new remote branches!\n"))
1047 self.ui.status(_("(did you forget to merge?"
1047 self.ui.status(_("(did you forget to merge?"
1048 " use push -f to force)\n"))
1048 " use push -f to force)\n"))
1049 return 1
1049 return 1
1050
1050
1051 if revs is None:
1051 if revs is None:
1052 cg = self.changegroup(update, 'push')
1052 cg = self.changegroup(update, 'push')
1053 else:
1053 else:
1054 cg = self.changegroupsubset(update, revs, 'push')
1054 cg = self.changegroupsubset(update, revs, 'push')
1055 return remote.addchangegroup(cg)
1055 return remote.addchangegroup(cg)
1056
1056
1057 def changegroupsubset(self, bases, heads, source):
1057 def changegroupsubset(self, bases, heads, source):
1058 """This function generates a changegroup consisting of all the nodes
1058 """This function generates a changegroup consisting of all the nodes
1059 that are descendents of any of the bases, and ancestors of any of
1059 that are descendents of any of the bases, and ancestors of any of
1060 the heads.
1060 the heads.
1061
1061
1062 It is fairly complex as determining which filenodes and which
1062 It is fairly complex as determining which filenodes and which
1063 manifest nodes need to be included for the changeset to be complete
1063 manifest nodes need to be included for the changeset to be complete
1064 is non-trivial.
1064 is non-trivial.
1065
1065
1066 Another wrinkle is doing the reverse, figuring out which changeset in
1066 Another wrinkle is doing the reverse, figuring out which changeset in
1067 the changegroup a particular filenode or manifestnode belongs to."""
1067 the changegroup a particular filenode or manifestnode belongs to."""
1068
1068
1069 self.hook('preoutgoing', throw=True, source=source)
1069 self.hook('preoutgoing', throw=True, source=source)
1070
1070
1071 # Set up some initial variables
1071 # Set up some initial variables
1072 # Make it easy to refer to self.changelog
1072 # Make it easy to refer to self.changelog
1073 cl = self.changelog
1073 cl = self.changelog
1074 # msng is short for missing - compute the list of changesets in this
1074 # msng is short for missing - compute the list of changesets in this
1075 # changegroup.
1075 # changegroup.
1076 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1076 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1077 # Some bases may turn out to be superfluous, and some heads may be
1077 # Some bases may turn out to be superfluous, and some heads may be
1078 # too. nodesbetween will return the minimal set of bases and heads
1078 # too. nodesbetween will return the minimal set of bases and heads
1079 # necessary to re-create the changegroup.
1079 # necessary to re-create the changegroup.
1080
1080
1081 # Known heads are the list of heads that it is assumed the recipient
1081 # Known heads are the list of heads that it is assumed the recipient
1082 # of this changegroup will know about.
1082 # of this changegroup will know about.
1083 knownheads = {}
1083 knownheads = {}
1084 # We assume that all parents of bases are known heads.
1084 # We assume that all parents of bases are known heads.
1085 for n in bases:
1085 for n in bases:
1086 for p in cl.parents(n):
1086 for p in cl.parents(n):
1087 if p != nullid:
1087 if p != nullid:
1088 knownheads[p] = 1
1088 knownheads[p] = 1
1089 knownheads = knownheads.keys()
1089 knownheads = knownheads.keys()
1090 if knownheads:
1090 if knownheads:
1091 # Now that we know what heads are known, we can compute which
1091 # Now that we know what heads are known, we can compute which
1092 # changesets are known. The recipient must know about all
1092 # changesets are known. The recipient must know about all
1093 # changesets required to reach the known heads from the null
1093 # changesets required to reach the known heads from the null
1094 # changeset.
1094 # changeset.
1095 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1095 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1096 junk = None
1096 junk = None
1097 # Transform the list into an ersatz set.
1097 # Transform the list into an ersatz set.
1098 has_cl_set = dict.fromkeys(has_cl_set)
1098 has_cl_set = dict.fromkeys(has_cl_set)
1099 else:
1099 else:
1100 # If there were no known heads, the recipient cannot be assumed to
1100 # If there were no known heads, the recipient cannot be assumed to
1101 # know about any changesets.
1101 # know about any changesets.
1102 has_cl_set = {}
1102 has_cl_set = {}
1103
1103
1104 # Make it easy to refer to self.manifest
1104 # Make it easy to refer to self.manifest
1105 mnfst = self.manifest
1105 mnfst = self.manifest
1106 # We don't know which manifests are missing yet
1106 # We don't know which manifests are missing yet
1107 msng_mnfst_set = {}
1107 msng_mnfst_set = {}
1108 # Nor do we know which filenodes are missing.
1108 # Nor do we know which filenodes are missing.
1109 msng_filenode_set = {}
1109 msng_filenode_set = {}
1110
1110
1111 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1111 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1112 junk = None
1112 junk = None
1113
1113
1114 # A changeset always belongs to itself, so the changenode lookup
1114 # A changeset always belongs to itself, so the changenode lookup
1115 # function for a changenode is identity.
1115 # function for a changenode is identity.
1116 def identity(x):
1116 def identity(x):
1117 return x
1117 return x
1118
1118
1119 # A function generating function. Sets up an environment for the
1119 # A function generating function. Sets up an environment for the
1120 # inner function.
1120 # inner function.
1121 def cmp_by_rev_func(revlog):
1121 def cmp_by_rev_func(revlog):
1122 # Compare two nodes by their revision number in the environment's
1122 # Compare two nodes by their revision number in the environment's
1123 # revision history. Since the revision number both represents the
1123 # revision history. Since the revision number both represents the
1124 # most efficient order to read the nodes in, and represents a
1124 # most efficient order to read the nodes in, and represents a
1125 # topological sorting of the nodes, this function is often useful.
1125 # topological sorting of the nodes, this function is often useful.
1126 def cmp_by_rev(a, b):
1126 def cmp_by_rev(a, b):
1127 return cmp(revlog.rev(a), revlog.rev(b))
1127 return cmp(revlog.rev(a), revlog.rev(b))
1128 return cmp_by_rev
1128 return cmp_by_rev
1129
1129
1130 # If we determine that a particular file or manifest node must be a
1130 # If we determine that a particular file or manifest node must be a
1131 # node that the recipient of the changegroup will already have, we can
1131 # node that the recipient of the changegroup will already have, we can
1132 # also assume the recipient will have all the parents. This function
1132 # also assume the recipient will have all the parents. This function
1133 # prunes them from the set of missing nodes.
1133 # prunes them from the set of missing nodes.
1134 def prune_parents(revlog, hasset, msngset):
1134 def prune_parents(revlog, hasset, msngset):
1135 haslst = hasset.keys()
1135 haslst = hasset.keys()
1136 haslst.sort(cmp_by_rev_func(revlog))
1136 haslst.sort(cmp_by_rev_func(revlog))
1137 for node in haslst:
1137 for node in haslst:
1138 parentlst = [p for p in revlog.parents(node) if p != nullid]
1138 parentlst = [p for p in revlog.parents(node) if p != nullid]
1139 while parentlst:
1139 while parentlst:
1140 n = parentlst.pop()
1140 n = parentlst.pop()
1141 if n not in hasset:
1141 if n not in hasset:
1142 hasset[n] = 1
1142 hasset[n] = 1
1143 p = [p for p in revlog.parents(n) if p != nullid]
1143 p = [p for p in revlog.parents(n) if p != nullid]
1144 parentlst.extend(p)
1144 parentlst.extend(p)
1145 for n in hasset:
1145 for n in hasset:
1146 msngset.pop(n, None)
1146 msngset.pop(n, None)
1147
1147
1148 # This is a function generating function used to set up an environment
1148 # This is a function generating function used to set up an environment
1149 # for the inner function to execute in.
1149 # for the inner function to execute in.
1150 def manifest_and_file_collector(changedfileset):
1150 def manifest_and_file_collector(changedfileset):
1151 # This is an information gathering function that gathers
1151 # This is an information gathering function that gathers
1152 # information from each changeset node that goes out as part of
1152 # information from each changeset node that goes out as part of
1153 # the changegroup. The information gathered is a list of which
1153 # the changegroup. The information gathered is a list of which
1154 # manifest nodes are potentially required (the recipient may
1154 # manifest nodes are potentially required (the recipient may
1155 # already have them) and total list of all files which were
1155 # already have them) and total list of all files which were
1156 # changed in any changeset in the changegroup.
1156 # changed in any changeset in the changegroup.
1157 #
1157 #
1158 # We also remember the first changenode we saw any manifest
1158 # We also remember the first changenode we saw any manifest
1159 # referenced by so we can later determine which changenode 'owns'
1159 # referenced by so we can later determine which changenode 'owns'
1160 # the manifest.
1160 # the manifest.
1161 def collect_manifests_and_files(clnode):
1161 def collect_manifests_and_files(clnode):
1162 c = cl.read(clnode)
1162 c = cl.read(clnode)
1163 for f in c[3]:
1163 for f in c[3]:
1164 # This is to make sure we only have one instance of each
1164 # This is to make sure we only have one instance of each
1165 # filename string for each filename.
1165 # filename string for each filename.
1166 changedfileset.setdefault(f, f)
1166 changedfileset.setdefault(f, f)
1167 msng_mnfst_set.setdefault(c[0], clnode)
1167 msng_mnfst_set.setdefault(c[0], clnode)
1168 return collect_manifests_and_files
1168 return collect_manifests_and_files
1169
1169
1170 # Figure out which manifest nodes (of the ones we think might be part
1170 # Figure out which manifest nodes (of the ones we think might be part
1171 # of the changegroup) the recipient must know about and remove them
1171 # of the changegroup) the recipient must know about and remove them
1172 # from the changegroup.
1172 # from the changegroup.
1173 def prune_manifests():
1173 def prune_manifests():
1174 has_mnfst_set = {}
1174 has_mnfst_set = {}
1175 for n in msng_mnfst_set:
1175 for n in msng_mnfst_set:
1176 # If a 'missing' manifest thinks it belongs to a changenode
1176 # If a 'missing' manifest thinks it belongs to a changenode
1177 # the recipient is assumed to have, obviously the recipient
1177 # the recipient is assumed to have, obviously the recipient
1178 # must have that manifest.
1178 # must have that manifest.
1179 linknode = cl.node(mnfst.linkrev(n))
1179 linknode = cl.node(mnfst.linkrev(n))
1180 if linknode in has_cl_set:
1180 if linknode in has_cl_set:
1181 has_mnfst_set[n] = 1
1181 has_mnfst_set[n] = 1
1182 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1182 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1183
1183
1184 # Use the information collected in collect_manifests_and_files to say
1184 # Use the information collected in collect_manifests_and_files to say
1185 # which changenode any manifestnode belongs to.
1185 # which changenode any manifestnode belongs to.
1186 def lookup_manifest_link(mnfstnode):
1186 def lookup_manifest_link(mnfstnode):
1187 return msng_mnfst_set[mnfstnode]
1187 return msng_mnfst_set[mnfstnode]
1188
1188
1189 # A function generating function that sets up the initial environment
1189 # A function generating function that sets up the initial environment
1190 # the inner function.
1190 # the inner function.
1191 def filenode_collector(changedfiles):
1191 def filenode_collector(changedfiles):
1192 next_rev = [0]
1192 next_rev = [0]
1193 # This gathers information from each manifestnode included in the
1193 # This gathers information from each manifestnode included in the
1194 # changegroup about which filenodes the manifest node references
1194 # changegroup about which filenodes the manifest node references
1195 # so we can include those in the changegroup too.
1195 # so we can include those in the changegroup too.
1196 #
1196 #
1197 # It also remembers which changenode each filenode belongs to. It
1197 # It also remembers which changenode each filenode belongs to. It
1198 # does this by assuming the a filenode belongs to the changenode
1198 # does this by assuming the a filenode belongs to the changenode
1199 # the first manifest that references it belongs to.
1199 # the first manifest that references it belongs to.
1200 def collect_msng_filenodes(mnfstnode):
1200 def collect_msng_filenodes(mnfstnode):
1201 r = mnfst.rev(mnfstnode)
1201 r = mnfst.rev(mnfstnode)
1202 if r == next_rev[0]:
1202 if r == next_rev[0]:
1203 # If the last rev we looked at was the one just previous,
1203 # If the last rev we looked at was the one just previous,
1204 # we only need to see a diff.
1204 # we only need to see a diff.
1205 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1205 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1206 # For each line in the delta
1206 # For each line in the delta
1207 for dline in delta.splitlines():
1207 for dline in delta.splitlines():
1208 # get the filename and filenode for that line
1208 # get the filename and filenode for that line
1209 f, fnode = dline.split('\0')
1209 f, fnode = dline.split('\0')
1210 fnode = bin(fnode[:40])
1210 fnode = bin(fnode[:40])
1211 f = changedfiles.get(f, None)
1211 f = changedfiles.get(f, None)
1212 # And if the file is in the list of files we care
1212 # And if the file is in the list of files we care
1213 # about.
1213 # about.
1214 if f is not None:
1214 if f is not None:
1215 # Get the changenode this manifest belongs to
1215 # Get the changenode this manifest belongs to
1216 clnode = msng_mnfst_set[mnfstnode]
1216 clnode = msng_mnfst_set[mnfstnode]
1217 # Create the set of filenodes for the file if
1217 # Create the set of filenodes for the file if
1218 # there isn't one already.
1218 # there isn't one already.
1219 ndset = msng_filenode_set.setdefault(f, {})
1219 ndset = msng_filenode_set.setdefault(f, {})
1220 # And set the filenode's changelog node to the
1220 # And set the filenode's changelog node to the
1221 # manifest's if it hasn't been set already.
1221 # manifest's if it hasn't been set already.
1222 ndset.setdefault(fnode, clnode)
1222 ndset.setdefault(fnode, clnode)
1223 else:
1223 else:
1224 # Otherwise we need a full manifest.
1224 # Otherwise we need a full manifest.
1225 m = mnfst.read(mnfstnode)
1225 m = mnfst.read(mnfstnode)
1226 # For every file in we care about.
1226 # For every file in we care about.
1227 for f in changedfiles:
1227 for f in changedfiles:
1228 fnode = m.get(f, None)
1228 fnode = m.get(f, None)
1229 # If it's in the manifest
1229 # If it's in the manifest
1230 if fnode is not None:
1230 if fnode is not None:
1231 # See comments above.
1231 # See comments above.
1232 clnode = msng_mnfst_set[mnfstnode]
1232 clnode = msng_mnfst_set[mnfstnode]
1233 ndset = msng_filenode_set.setdefault(f, {})
1233 ndset = msng_filenode_set.setdefault(f, {})
1234 ndset.setdefault(fnode, clnode)
1234 ndset.setdefault(fnode, clnode)
1235 # Remember the revision we hope to see next.
1235 # Remember the revision we hope to see next.
1236 next_rev[0] = r + 1
1236 next_rev[0] = r + 1
1237 return collect_msng_filenodes
1237 return collect_msng_filenodes
1238
1238
1239 # We have a list of filenodes we think we need for a file, lets remove
1239 # We have a list of filenodes we think we need for a file, lets remove
1240 # all those we now the recipient must have.
1240 # all those we now the recipient must have.
1241 def prune_filenodes(f, filerevlog):
1241 def prune_filenodes(f, filerevlog):
1242 msngset = msng_filenode_set[f]
1242 msngset = msng_filenode_set[f]
1243 hasset = {}
1243 hasset = {}
1244 # If a 'missing' filenode thinks it belongs to a changenode we
1244 # If a 'missing' filenode thinks it belongs to a changenode we
1245 # assume the recipient must have, then the recipient must have
1245 # assume the recipient must have, then the recipient must have
1246 # that filenode.
1246 # that filenode.
1247 for n in msngset:
1247 for n in msngset:
1248 clnode = cl.node(filerevlog.linkrev(n))
1248 clnode = cl.node(filerevlog.linkrev(n))
1249 if clnode in has_cl_set:
1249 if clnode in has_cl_set:
1250 hasset[n] = 1
1250 hasset[n] = 1
1251 prune_parents(filerevlog, hasset, msngset)
1251 prune_parents(filerevlog, hasset, msngset)
1252
1252
1253 # A function generator function that sets up the a context for the
1253 # A function generator function that sets up the a context for the
1254 # inner function.
1254 # inner function.
1255 def lookup_filenode_link_func(fname):
1255 def lookup_filenode_link_func(fname):
1256 msngset = msng_filenode_set[fname]
1256 msngset = msng_filenode_set[fname]
1257 # Lookup the changenode the filenode belongs to.
1257 # Lookup the changenode the filenode belongs to.
1258 def lookup_filenode_link(fnode):
1258 def lookup_filenode_link(fnode):
1259 return msngset[fnode]
1259 return msngset[fnode]
1260 return lookup_filenode_link
1260 return lookup_filenode_link
1261
1261
1262 # Now that we have all theses utility functions to help out and
1262 # Now that we have all theses utility functions to help out and
1263 # logically divide up the task, generate the group.
1263 # logically divide up the task, generate the group.
1264 def gengroup():
1264 def gengroup():
1265 # The set of changed files starts empty.
1265 # The set of changed files starts empty.
1266 changedfiles = {}
1266 changedfiles = {}
1267 # Create a changenode group generator that will call our functions
1267 # Create a changenode group generator that will call our functions
1268 # back to lookup the owning changenode and collect information.
1268 # back to lookup the owning changenode and collect information.
1269 group = cl.group(msng_cl_lst, identity,
1269 group = cl.group(msng_cl_lst, identity,
1270 manifest_and_file_collector(changedfiles))
1270 manifest_and_file_collector(changedfiles))
1271 for chnk in group:
1271 for chnk in group:
1272 yield chnk
1272 yield chnk
1273
1273
1274 # The list of manifests has been collected by the generator
1274 # The list of manifests has been collected by the generator
1275 # calling our functions back.
1275 # calling our functions back.
1276 prune_manifests()
1276 prune_manifests()
1277 msng_mnfst_lst = msng_mnfst_set.keys()
1277 msng_mnfst_lst = msng_mnfst_set.keys()
1278 # Sort the manifestnodes by revision number.
1278 # Sort the manifestnodes by revision number.
1279 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1279 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1280 # Create a generator for the manifestnodes that calls our lookup
1280 # Create a generator for the manifestnodes that calls our lookup
1281 # and data collection functions back.
1281 # and data collection functions back.
1282 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1282 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1283 filenode_collector(changedfiles))
1283 filenode_collector(changedfiles))
1284 for chnk in group:
1284 for chnk in group:
1285 yield chnk
1285 yield chnk
1286
1286
1287 # These are no longer needed, dereference and toss the memory for
1287 # These are no longer needed, dereference and toss the memory for
1288 # them.
1288 # them.
1289 msng_mnfst_lst = None
1289 msng_mnfst_lst = None
1290 msng_mnfst_set.clear()
1290 msng_mnfst_set.clear()
1291
1291
1292 changedfiles = changedfiles.keys()
1292 changedfiles = changedfiles.keys()
1293 changedfiles.sort()
1293 changedfiles.sort()
1294 # Go through all our files in order sorted by name.
1294 # Go through all our files in order sorted by name.
1295 for fname in changedfiles:
1295 for fname in changedfiles:
1296 filerevlog = self.file(fname)
1296 filerevlog = self.file(fname)
1297 # Toss out the filenodes that the recipient isn't really
1297 # Toss out the filenodes that the recipient isn't really
1298 # missing.
1298 # missing.
1299 if msng_filenode_set.has_key(fname):
1299 if msng_filenode_set.has_key(fname):
1300 prune_filenodes(fname, filerevlog)
1300 prune_filenodes(fname, filerevlog)
1301 msng_filenode_lst = msng_filenode_set[fname].keys()
1301 msng_filenode_lst = msng_filenode_set[fname].keys()
1302 else:
1302 else:
1303 msng_filenode_lst = []
1303 msng_filenode_lst = []
1304 # If any filenodes are left, generate the group for them,
1304 # If any filenodes are left, generate the group for them,
1305 # otherwise don't bother.
1305 # otherwise don't bother.
1306 if len(msng_filenode_lst) > 0:
1306 if len(msng_filenode_lst) > 0:
1307 yield changegroup.genchunk(fname)
1307 yield changegroup.genchunk(fname)
1308 # Sort the filenodes by their revision #
1308 # Sort the filenodes by their revision #
1309 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1309 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1310 # Create a group generator and only pass in a changenode
1310 # Create a group generator and only pass in a changenode
1311 # lookup function as we need to collect no information
1311 # lookup function as we need to collect no information
1312 # from filenodes.
1312 # from filenodes.
1313 group = filerevlog.group(msng_filenode_lst,
1313 group = filerevlog.group(msng_filenode_lst,
1314 lookup_filenode_link_func(fname))
1314 lookup_filenode_link_func(fname))
1315 for chnk in group:
1315 for chnk in group:
1316 yield chnk
1316 yield chnk
1317 if msng_filenode_set.has_key(fname):
1317 if msng_filenode_set.has_key(fname):
1318 # Don't need this anymore, toss it to free memory.
1318 # Don't need this anymore, toss it to free memory.
1319 del msng_filenode_set[fname]
1319 del msng_filenode_set[fname]
1320 # Signal that no more groups are left.
1320 # Signal that no more groups are left.
1321 yield changegroup.closechunk()
1321 yield changegroup.closechunk()
1322
1322
1323 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1323 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1324
1324
1325 return util.chunkbuffer(gengroup())
1325 return util.chunkbuffer(gengroup())
1326
1326
1327 def changegroup(self, basenodes, source):
1327 def changegroup(self, basenodes, source):
1328 """Generate a changegroup of all nodes that we have that a recipient
1328 """Generate a changegroup of all nodes that we have that a recipient
1329 doesn't.
1329 doesn't.
1330
1330
1331 This is much easier than the previous function as we can assume that
1331 This is much easier than the previous function as we can assume that
1332 the recipient has any changenode we aren't sending them."""
1332 the recipient has any changenode we aren't sending them."""
1333
1333
1334 self.hook('preoutgoing', throw=True, source=source)
1334 self.hook('preoutgoing', throw=True, source=source)
1335
1335
1336 cl = self.changelog
1336 cl = self.changelog
1337 nodes = cl.nodesbetween(basenodes, None)[0]
1337 nodes = cl.nodesbetween(basenodes, None)[0]
1338 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1338 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1339
1339
1340 def identity(x):
1340 def identity(x):
1341 return x
1341 return x
1342
1342
1343 def gennodelst(revlog):
1343 def gennodelst(revlog):
1344 for r in xrange(0, revlog.count()):
1344 for r in xrange(0, revlog.count()):
1345 n = revlog.node(r)
1345 n = revlog.node(r)
1346 if revlog.linkrev(n) in revset:
1346 if revlog.linkrev(n) in revset:
1347 yield n
1347 yield n
1348
1348
1349 def changed_file_collector(changedfileset):
1349 def changed_file_collector(changedfileset):
1350 def collect_changed_files(clnode):
1350 def collect_changed_files(clnode):
1351 c = cl.read(clnode)
1351 c = cl.read(clnode)
1352 for fname in c[3]:
1352 for fname in c[3]:
1353 changedfileset[fname] = 1
1353 changedfileset[fname] = 1
1354 return collect_changed_files
1354 return collect_changed_files
1355
1355
1356 def lookuprevlink_func(revlog):
1356 def lookuprevlink_func(revlog):
1357 def lookuprevlink(n):
1357 def lookuprevlink(n):
1358 return cl.node(revlog.linkrev(n))
1358 return cl.node(revlog.linkrev(n))
1359 return lookuprevlink
1359 return lookuprevlink
1360
1360
1361 def gengroup():
1361 def gengroup():
1362 # construct a list of all changed files
1362 # construct a list of all changed files
1363 changedfiles = {}
1363 changedfiles = {}
1364
1364
1365 for chnk in cl.group(nodes, identity,
1365 for chnk in cl.group(nodes, identity,
1366 changed_file_collector(changedfiles)):
1366 changed_file_collector(changedfiles)):
1367 yield chnk
1367 yield chnk
1368 changedfiles = changedfiles.keys()
1368 changedfiles = changedfiles.keys()
1369 changedfiles.sort()
1369 changedfiles.sort()
1370
1370
1371 mnfst = self.manifest
1371 mnfst = self.manifest
1372 nodeiter = gennodelst(mnfst)
1372 nodeiter = gennodelst(mnfst)
1373 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1373 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1374 yield chnk
1374 yield chnk
1375
1375
1376 for fname in changedfiles:
1376 for fname in changedfiles:
1377 filerevlog = self.file(fname)
1377 filerevlog = self.file(fname)
1378 nodeiter = gennodelst(filerevlog)
1378 nodeiter = gennodelst(filerevlog)
1379 nodeiter = list(nodeiter)
1379 nodeiter = list(nodeiter)
1380 if nodeiter:
1380 if nodeiter:
1381 yield changegroup.genchunk(fname)
1381 yield changegroup.genchunk(fname)
1382 lookup = lookuprevlink_func(filerevlog)
1382 lookup = lookuprevlink_func(filerevlog)
1383 for chnk in filerevlog.group(nodeiter, lookup):
1383 for chnk in filerevlog.group(nodeiter, lookup):
1384 yield chnk
1384 yield chnk
1385
1385
1386 yield changegroup.closechunk()
1386 yield changegroup.closechunk()
1387
1387
1388 if nodes:
1388 if nodes:
1389 self.hook('outgoing', node=hex(nodes[0]), source=source)
1389 self.hook('outgoing', node=hex(nodes[0]), source=source)
1390
1390
1391 return util.chunkbuffer(gengroup())
1391 return util.chunkbuffer(gengroup())
1392
1392
1393 def addchangegroup(self, source):
1393 def addchangegroup(self, source):
1394 """add changegroup to repo.
1394 """add changegroup to repo.
1395 returns number of heads modified or added + 1."""
1395 returns number of heads modified or added + 1."""
1396
1396
1397 def csmap(x):
1397 def csmap(x):
1398 self.ui.debug(_("add changeset %s\n") % short(x))
1398 self.ui.debug(_("add changeset %s\n") % short(x))
1399 return cl.count()
1399 return cl.count()
1400
1400
1401 def revmap(x):
1401 def revmap(x):
1402 return cl.rev(x)
1402 return cl.rev(x)
1403
1403
1404 if not source:
1404 if not source:
1405 return 0
1405 return 0
1406
1406
1407 self.hook('prechangegroup', throw=True)
1407 self.hook('prechangegroup', throw=True)
1408
1408
1409 changesets = files = revisions = 0
1409 changesets = files = revisions = 0
1410
1410
1411 tr = self.transaction()
1411 tr = self.transaction()
1412
1412
1413 # write changelog and manifest data to temp files so
1413 # write changelog and manifest data to temp files so
1414 # concurrent readers will not see inconsistent view
1414 # concurrent readers will not see inconsistent view
1415 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1415 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1416
1416
1417 oldheads = len(cl.heads())
1417 oldheads = len(cl.heads())
1418
1418
1419 # pull off the changeset group
1419 # pull off the changeset group
1420 self.ui.status(_("adding changesets\n"))
1420 self.ui.status(_("adding changesets\n"))
1421 co = cl.tip()
1421 co = cl.tip()
1422 chunkiter = changegroup.chunkiter(source)
1422 chunkiter = changegroup.chunkiter(source)
1423 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1423 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1424 cnr, cor = map(cl.rev, (cn, co))
1424 cnr, cor = map(cl.rev, (cn, co))
1425 if cn == nullid:
1425 if cn == nullid:
1426 cnr = cor
1426 cnr = cor
1427 changesets = cnr - cor
1427 changesets = cnr - cor
1428
1428
1429 mf = appendfile.appendmanifest(self.opener, self.manifest.version)
1429 mf = appendfile.appendmanifest(self.opener, self.manifest.version)
1430
1430
1431 # pull off the manifest group
1431 # pull off the manifest group
1432 self.ui.status(_("adding manifests\n"))
1432 self.ui.status(_("adding manifests\n"))
1433 mm = mf.tip()
1433 mm = mf.tip()
1434 chunkiter = changegroup.chunkiter(source)
1434 chunkiter = changegroup.chunkiter(source)
1435 mo = mf.addgroup(chunkiter, revmap, tr)
1435 mo = mf.addgroup(chunkiter, revmap, tr)
1436
1436
1437 # process the files
1437 # process the files
1438 self.ui.status(_("adding file changes\n"))
1438 self.ui.status(_("adding file changes\n"))
1439 while 1:
1439 while 1:
1440 f = changegroup.getchunk(source)
1440 f = changegroup.getchunk(source)
1441 if not f:
1441 if not f:
1442 break
1442 break
1443 self.ui.debug(_("adding %s revisions\n") % f)
1443 self.ui.debug(_("adding %s revisions\n") % f)
1444 fl = self.file(f)
1444 fl = self.file(f)
1445 o = fl.count()
1445 o = fl.count()
1446 chunkiter = changegroup.chunkiter(source)
1446 chunkiter = changegroup.chunkiter(source)
1447 n = fl.addgroup(chunkiter, revmap, tr)
1447 n = fl.addgroup(chunkiter, revmap, tr)
1448 revisions += fl.count() - o
1448 revisions += fl.count() - o
1449 files += 1
1449 files += 1
1450
1450
1451 # write order here is important so concurrent readers will see
1451 # write order here is important so concurrent readers will see
1452 # consistent view of repo
1452 # consistent view of repo
1453 mf.writedata()
1453 mf.writedata()
1454 cl.writedata()
1454 cl.writedata()
1455
1455
1456 # make changelog and manifest see real files again
1456 # make changelog and manifest see real files again
1457 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1457 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1458 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1458 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1459 self.changelog.checkinlinesize(tr)
1459 self.changelog.checkinlinesize(tr)
1460 self.manifest.checkinlinesize(tr)
1460 self.manifest.checkinlinesize(tr)
1461
1461
1462 newheads = len(self.changelog.heads())
1462 newheads = len(self.changelog.heads())
1463 heads = ""
1463 heads = ""
1464 if oldheads and newheads > oldheads:
1464 if oldheads and newheads > oldheads:
1465 heads = _(" (+%d heads)") % (newheads - oldheads)
1465 heads = _(" (+%d heads)") % (newheads - oldheads)
1466
1466
1467 self.ui.status(_("added %d changesets"
1467 self.ui.status(_("added %d changesets"
1468 " with %d changes to %d files%s\n")
1468 " with %d changes to %d files%s\n")
1469 % (changesets, revisions, files, heads))
1469 % (changesets, revisions, files, heads))
1470
1470
1471 self.hook('pretxnchangegroup', throw=True,
1471 self.hook('pretxnchangegroup', throw=True,
1472 node=hex(self.changelog.node(cor+1)))
1472 node=hex(self.changelog.node(cor+1)))
1473
1473
1474 tr.close()
1474 tr.close()
1475
1475
1476 if changesets > 0:
1476 if changesets > 0:
1477 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1477 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1478
1478
1479 for i in range(cor + 1, cnr + 1):
1479 for i in range(cor + 1, cnr + 1):
1480 self.hook("incoming", node=hex(self.changelog.node(i)))
1480 self.hook("incoming", node=hex(self.changelog.node(i)))
1481
1481
1482 return newheads - oldheads + 1
1482 return newheads - oldheads + 1
1483
1483
1484 def update(self, node, allow=False, force=False, choose=None,
1484 def update(self, node, allow=False, force=False, choose=None,
1485 moddirstate=True, forcemerge=False, wlock=None):
1485 moddirstate=True, forcemerge=False, wlock=None):
1486 pl = self.dirstate.parents()
1486 pl = self.dirstate.parents()
1487 if not force and pl[1] != nullid:
1487 if not force and pl[1] != nullid:
1488 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1488 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1489 return 1
1489 return 1
1490
1490
1491 err = False
1491 err = False
1492
1492
1493 p1, p2 = pl[0], node
1493 p1, p2 = pl[0], node
1494 pa = self.changelog.ancestor(p1, p2)
1494 pa = self.changelog.ancestor(p1, p2)
1495 m1n = self.changelog.read(p1)[0]
1495 m1n = self.changelog.read(p1)[0]
1496 m2n = self.changelog.read(p2)[0]
1496 m2n = self.changelog.read(p2)[0]
1497 man = self.manifest.ancestor(m1n, m2n)
1497 man = self.manifest.ancestor(m1n, m2n)
1498 m1 = self.manifest.read(m1n)
1498 m1 = self.manifest.read(m1n)
1499 mf1 = self.manifest.readflags(m1n)
1499 mf1 = self.manifest.readflags(m1n)
1500 m2 = self.manifest.read(m2n).copy()
1500 m2 = self.manifest.read(m2n).copy()
1501 mf2 = self.manifest.readflags(m2n)
1501 mf2 = self.manifest.readflags(m2n)
1502 ma = self.manifest.read(man)
1502 ma = self.manifest.read(man)
1503 mfa = self.manifest.readflags(man)
1503 mfa = self.manifest.readflags(man)
1504
1504
1505 modified, added, removed, deleted, unknown = self.changes()
1505 modified, added, removed, deleted, unknown = self.changes()
1506
1506
1507 # is this a jump, or a merge? i.e. is there a linear path
1507 # is this a jump, or a merge? i.e. is there a linear path
1508 # from p1 to p2?
1508 # from p1 to p2?
1509 linear_path = (pa == p1 or pa == p2)
1509 linear_path = (pa == p1 or pa == p2)
1510
1510
1511 if allow and linear_path:
1511 if allow and linear_path:
1512 raise util.Abort(_("there is nothing to merge, "
1512 raise util.Abort(_("there is nothing to merge, "
1513 "just use 'hg update'"))
1513 "just use 'hg update'"))
1514 if allow and not forcemerge:
1514 if allow and not forcemerge:
1515 if modified or added or removed:
1515 if modified or added or removed:
1516 raise util.Abort(_("outstanding uncommitted changes"))
1516 raise util.Abort(_("outstanding uncommitted changes"))
1517 if not forcemerge and not force:
1517 if not forcemerge and not force:
1518 for f in unknown:
1518 for f in unknown:
1519 if f in m2:
1519 if f in m2:
1520 t1 = self.wread(f)
1520 t1 = self.wread(f)
1521 t2 = self.file(f).read(m2[f])
1521 t2 = self.file(f).read(m2[f])
1522 if cmp(t1, t2) != 0:
1522 if cmp(t1, t2) != 0:
1523 raise util.Abort(_("'%s' already exists in the working"
1523 raise util.Abort(_("'%s' already exists in the working"
1524 " dir and differs from remote") % f)
1524 " dir and differs from remote") % f)
1525
1525
1526 # resolve the manifest to determine which files
1526 # resolve the manifest to determine which files
1527 # we care about merging
1527 # we care about merging
1528 self.ui.note(_("resolving manifests\n"))
1528 self.ui.note(_("resolving manifests\n"))
1529 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1529 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1530 (force, allow, moddirstate, linear_path))
1530 (force, allow, moddirstate, linear_path))
1531 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1531 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1532 (short(man), short(m1n), short(m2n)))
1532 (short(man), short(m1n), short(m2n)))
1533
1533
1534 merge = {}
1534 merge = {}
1535 get = {}
1535 get = {}
1536 remove = []
1536 remove = []
1537
1537
1538 # construct a working dir manifest
1538 # construct a working dir manifest
1539 mw = m1.copy()
1539 mw = m1.copy()
1540 mfw = mf1.copy()
1540 mfw = mf1.copy()
1541 umap = dict.fromkeys(unknown)
1541 umap = dict.fromkeys(unknown)
1542
1542
1543 for f in added + modified + unknown:
1543 for f in added + modified + unknown:
1544 mw[f] = ""
1544 mw[f] = ""
1545 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1545 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1546
1546
1547 if moddirstate and not wlock:
1547 if moddirstate and not wlock:
1548 wlock = self.wlock()
1548 wlock = self.wlock()
1549
1549
1550 for f in deleted + removed:
1550 for f in deleted + removed:
1551 if f in mw:
1551 if f in mw:
1552 del mw[f]
1552 del mw[f]
1553
1553
1554 # If we're jumping between revisions (as opposed to merging),
1554 # If we're jumping between revisions (as opposed to merging),
1555 # and if neither the working directory nor the target rev has
1555 # and if neither the working directory nor the target rev has
1556 # the file, then we need to remove it from the dirstate, to
1556 # the file, then we need to remove it from the dirstate, to
1557 # prevent the dirstate from listing the file when it is no
1557 # prevent the dirstate from listing the file when it is no
1558 # longer in the manifest.
1558 # longer in the manifest.
1559 if moddirstate and linear_path and f not in m2:
1559 if moddirstate and linear_path and f not in m2:
1560 self.dirstate.forget((f,))
1560 self.dirstate.forget((f,))
1561
1561
1562 # Compare manifests
1562 # Compare manifests
1563 for f, n in mw.iteritems():
1563 for f, n in mw.iteritems():
1564 if choose and not choose(f):
1564 if choose and not choose(f):
1565 continue
1565 continue
1566 if f in m2:
1566 if f in m2:
1567 s = 0
1567 s = 0
1568
1568
1569 # is the wfile new since m1, and match m2?
1569 # is the wfile new since m1, and match m2?
1570 if f not in m1:
1570 if f not in m1:
1571 t1 = self.wread(f)
1571 t1 = self.wread(f)
1572 t2 = self.file(f).read(m2[f])
1572 t2 = self.file(f).read(m2[f])
1573 if cmp(t1, t2) == 0:
1573 if cmp(t1, t2) == 0:
1574 n = m2[f]
1574 n = m2[f]
1575 del t1, t2
1575 del t1, t2
1576
1576
1577 # are files different?
1577 # are files different?
1578 if n != m2[f]:
1578 if n != m2[f]:
1579 a = ma.get(f, nullid)
1579 a = ma.get(f, nullid)
1580 # are both different from the ancestor?
1580 # are both different from the ancestor?
1581 if n != a and m2[f] != a:
1581 if n != a and m2[f] != a:
1582 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1582 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1583 # merge executable bits
1583 # merge executable bits
1584 # "if we changed or they changed, change in merge"
1584 # "if we changed or they changed, change in merge"
1585 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1585 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1586 mode = ((a^b) | (a^c)) ^ a
1586 mode = ((a^b) | (a^c)) ^ a
1587 merge[f] = (m1.get(f, nullid), m2[f], mode)
1587 merge[f] = (m1.get(f, nullid), m2[f], mode)
1588 s = 1
1588 s = 1
1589 # are we clobbering?
1589 # are we clobbering?
1590 # is remote's version newer?
1590 # is remote's version newer?
1591 # or are we going back in time?
1591 # or are we going back in time?
1592 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1592 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1593 self.ui.debug(_(" remote %s is newer, get\n") % f)
1593 self.ui.debug(_(" remote %s is newer, get\n") % f)
1594 get[f] = m2[f]
1594 get[f] = m2[f]
1595 s = 1
1595 s = 1
1596 elif f in umap or f in added:
1596 elif f in umap or f in added:
1597 # this unknown file is the same as the checkout
1597 # this unknown file is the same as the checkout
1598 # we need to reset the dirstate if the file was added
1598 # we need to reset the dirstate if the file was added
1599 get[f] = m2[f]
1599 get[f] = m2[f]
1600
1600
1601 if not s and mfw[f] != mf2[f]:
1601 if not s and mfw[f] != mf2[f]:
1602 if force:
1602 if force:
1603 self.ui.debug(_(" updating permissions for %s\n") % f)
1603 self.ui.debug(_(" updating permissions for %s\n") % f)
1604 util.set_exec(self.wjoin(f), mf2[f])
1604 util.set_exec(self.wjoin(f), mf2[f])
1605 else:
1605 else:
1606 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1606 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1607 mode = ((a^b) | (a^c)) ^ a
1607 mode = ((a^b) | (a^c)) ^ a
1608 if mode != b:
1608 if mode != b:
1609 self.ui.debug(_(" updating permissions for %s\n")
1609 self.ui.debug(_(" updating permissions for %s\n")
1610 % f)
1610 % f)
1611 util.set_exec(self.wjoin(f), mode)
1611 util.set_exec(self.wjoin(f), mode)
1612 del m2[f]
1612 del m2[f]
1613 elif f in ma:
1613 elif f in ma:
1614 if n != ma[f]:
1614 if n != ma[f]:
1615 r = _("d")
1615 r = _("d")
1616 if not force and (linear_path or allow):
1616 if not force and (linear_path or allow):
1617 r = self.ui.prompt(
1617 r = self.ui.prompt(
1618 (_(" local changed %s which remote deleted\n") % f) +
1618 (_(" local changed %s which remote deleted\n") % f) +
1619 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1619 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1620 if r == _("d"):
1620 if r == _("d"):
1621 remove.append(f)
1621 remove.append(f)
1622 else:
1622 else:
1623 self.ui.debug(_("other deleted %s\n") % f)
1623 self.ui.debug(_("other deleted %s\n") % f)
1624 remove.append(f) # other deleted it
1624 remove.append(f) # other deleted it
1625 else:
1625 else:
1626 # file is created on branch or in working directory
1626 # file is created on branch or in working directory
1627 if force and f not in umap:
1627 if force and f not in umap:
1628 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1628 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1629 remove.append(f)
1629 remove.append(f)
1630 elif n == m1.get(f, nullid): # same as parent
1630 elif n == m1.get(f, nullid): # same as parent
1631 if p2 == pa: # going backwards?
1631 if p2 == pa: # going backwards?
1632 self.ui.debug(_("remote deleted %s\n") % f)
1632 self.ui.debug(_("remote deleted %s\n") % f)
1633 remove.append(f)
1633 remove.append(f)
1634 else:
1634 else:
1635 self.ui.debug(_("local modified %s, keeping\n") % f)
1635 self.ui.debug(_("local modified %s, keeping\n") % f)
1636 else:
1636 else:
1637 self.ui.debug(_("working dir created %s, keeping\n") % f)
1637 self.ui.debug(_("working dir created %s, keeping\n") % f)
1638
1638
1639 for f, n in m2.iteritems():
1639 for f, n in m2.iteritems():
1640 if choose and not choose(f):
1640 if choose and not choose(f):
1641 continue
1641 continue
1642 if f[0] == "/":
1642 if f[0] == "/":
1643 continue
1643 continue
1644 if f in ma and n != ma[f]:
1644 if f in ma and n != ma[f]:
1645 r = _("k")
1645 r = _("k")
1646 if not force and (linear_path or allow):
1646 if not force and (linear_path or allow):
1647 r = self.ui.prompt(
1647 r = self.ui.prompt(
1648 (_("remote changed %s which local deleted\n") % f) +
1648 (_("remote changed %s which local deleted\n") % f) +
1649 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1649 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1650 if r == _("k"):
1650 if r == _("k"):
1651 get[f] = n
1651 get[f] = n
1652 elif f not in ma:
1652 elif f not in ma:
1653 self.ui.debug(_("remote created %s\n") % f)
1653 self.ui.debug(_("remote created %s\n") % f)
1654 get[f] = n
1654 get[f] = n
1655 else:
1655 else:
1656 if force or p2 == pa: # going backwards?
1656 if force or p2 == pa: # going backwards?
1657 self.ui.debug(_("local deleted %s, recreating\n") % f)
1657 self.ui.debug(_("local deleted %s, recreating\n") % f)
1658 get[f] = n
1658 get[f] = n
1659 else:
1659 else:
1660 self.ui.debug(_("local deleted %s\n") % f)
1660 self.ui.debug(_("local deleted %s\n") % f)
1661
1661
1662 del mw, m1, m2, ma
1662 del mw, m1, m2, ma
1663
1663
1664 if force:
1664 if force:
1665 for f in merge:
1665 for f in merge:
1666 get[f] = merge[f][1]
1666 get[f] = merge[f][1]
1667 merge = {}
1667 merge = {}
1668
1668
1669 if linear_path or force:
1669 if linear_path or force:
1670 # we don't need to do any magic, just jump to the new rev
1670 # we don't need to do any magic, just jump to the new rev
1671 branch_merge = False
1671 branch_merge = False
1672 p1, p2 = p2, nullid
1672 p1, p2 = p2, nullid
1673 else:
1673 else:
1674 if not allow:
1674 if not allow:
1675 self.ui.status(_("this update spans a branch"
1675 self.ui.status(_("this update spans a branch"
1676 " affecting the following files:\n"))
1676 " affecting the following files:\n"))
1677 fl = merge.keys() + get.keys()
1677 fl = merge.keys() + get.keys()
1678 fl.sort()
1678 fl.sort()
1679 for f in fl:
1679 for f in fl:
1680 cf = ""
1680 cf = ""
1681 if f in merge:
1681 if f in merge:
1682 cf = _(" (resolve)")
1682 cf = _(" (resolve)")
1683 self.ui.status(" %s%s\n" % (f, cf))
1683 self.ui.status(" %s%s\n" % (f, cf))
1684 self.ui.warn(_("aborting update spanning branches!\n"))
1684 self.ui.warn(_("aborting update spanning branches!\n"))
1685 self.ui.status(_("(use 'hg merge' to merge across branches"
1685 self.ui.status(_("(use 'hg merge' to merge across branches"
1686 " or 'hg update -C' to lose changes)\n"))
1686 " or 'hg update -C' to lose changes)\n"))
1687 return 1
1687 return 1
1688 branch_merge = True
1688 branch_merge = True
1689
1689
1690 # get the files we don't need to change
1690 # get the files we don't need to change
1691 files = get.keys()
1691 files = get.keys()
1692 files.sort()
1692 files.sort()
1693 for f in files:
1693 for f in files:
1694 if f[0] == "/":
1694 if f[0] == "/":
1695 continue
1695 continue
1696 self.ui.note(_("getting %s\n") % f)
1696 self.ui.note(_("getting %s\n") % f)
1697 t = self.file(f).read(get[f])
1697 t = self.file(f).read(get[f])
1698 self.wwrite(f, t)
1698 self.wwrite(f, t)
1699 util.set_exec(self.wjoin(f), mf2[f])
1699 util.set_exec(self.wjoin(f), mf2[f])
1700 if moddirstate:
1700 if moddirstate:
1701 if branch_merge:
1701 if branch_merge:
1702 self.dirstate.update([f], 'n', st_mtime=-1)
1702 self.dirstate.update([f], 'n', st_mtime=-1)
1703 else:
1703 else:
1704 self.dirstate.update([f], 'n')
1704 self.dirstate.update([f], 'n')
1705
1705
1706 # merge the tricky bits
1706 # merge the tricky bits
1707 failedmerge = []
1707 failedmerge = []
1708 files = merge.keys()
1708 files = merge.keys()
1709 files.sort()
1709 files.sort()
1710 xp1 = hex(p1)
1710 xp1 = hex(p1)
1711 xp2 = hex(p2)
1711 xp2 = hex(p2)
1712 for f in files:
1712 for f in files:
1713 self.ui.status(_("merging %s\n") % f)
1713 self.ui.status(_("merging %s\n") % f)
1714 my, other, flag = merge[f]
1714 my, other, flag = merge[f]
1715 ret = self.merge3(f, my, other, xp1, xp2)
1715 ret = self.merge3(f, my, other, xp1, xp2)
1716 if ret:
1716 if ret:
1717 err = True
1717 err = True
1718 failedmerge.append(f)
1718 failedmerge.append(f)
1719 util.set_exec(self.wjoin(f), flag)
1719 util.set_exec(self.wjoin(f), flag)
1720 if moddirstate:
1720 if moddirstate:
1721 if branch_merge:
1721 if branch_merge:
1722 # We've done a branch merge, mark this file as merged
1722 # We've done a branch merge, mark this file as merged
1723 # so that we properly record the merger later
1723 # so that we properly record the merger later
1724 self.dirstate.update([f], 'm')
1724 self.dirstate.update([f], 'm')
1725 else:
1725 else:
1726 # We've update-merged a locally modified file, so
1726 # We've update-merged a locally modified file, so
1727 # we set the dirstate to emulate a normal checkout
1727 # we set the dirstate to emulate a normal checkout
1728 # of that file some time in the past. Thus our
1728 # of that file some time in the past. Thus our
1729 # merge will appear as a normal local file
1729 # merge will appear as a normal local file
1730 # modification.
1730 # modification.
1731 f_len = len(self.file(f).read(other))
1731 f_len = len(self.file(f).read(other))
1732 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1732 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1733
1733
1734 remove.sort()
1734 remove.sort()
1735 for f in remove:
1735 for f in remove:
1736 self.ui.note(_("removing %s\n") % f)
1736 self.ui.note(_("removing %s\n") % f)
1737 util.audit_path(f)
1737 util.audit_path(f)
1738 try:
1738 try:
1739 util.unlink(self.wjoin(f))
1739 util.unlink(self.wjoin(f))
1740 except OSError, inst:
1740 except OSError, inst:
1741 if inst.errno != errno.ENOENT:
1741 if inst.errno != errno.ENOENT:
1742 self.ui.warn(_("update failed to remove %s: %s!\n") %
1742 self.ui.warn(_("update failed to remove %s: %s!\n") %
1743 (f, inst.strerror))
1743 (f, inst.strerror))
1744 if moddirstate:
1744 if moddirstate:
1745 if branch_merge:
1745 if branch_merge:
1746 self.dirstate.update(remove, 'r')
1746 self.dirstate.update(remove, 'r')
1747 else:
1747 else:
1748 self.dirstate.forget(remove)
1748 self.dirstate.forget(remove)
1749
1749
1750 if moddirstate:
1750 if moddirstate:
1751 self.dirstate.setparents(p1, p2)
1751 self.dirstate.setparents(p1, p2)
1752
1752
1753 stat = ((len(get), _("updated")),
1753 stat = ((len(get), _("updated")),
1754 (len(merge) - len(failedmerge), _("merged")),
1754 (len(merge) - len(failedmerge), _("merged")),
1755 (len(remove), _("removed")),
1755 (len(remove), _("removed")),
1756 (len(failedmerge), _("unresolved")))
1756 (len(failedmerge), _("unresolved")))
1757 note = ", ".join([_("%d files %s") % s for s in stat])
1757 note = ", ".join([_("%d files %s") % s for s in stat])
1758 self.ui.note("%s\n" % note)
1758 self.ui.note("%s\n" % note)
1759 if moddirstate and branch_merge:
1759 if moddirstate and branch_merge:
1760 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1760 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1761
1761
1762 return err
1762 return err
1763
1763
1764 def merge3(self, fn, my, other, p1, p2):
1764 def merge3(self, fn, my, other, p1, p2):
1765 """perform a 3-way merge in the working directory"""
1765 """perform a 3-way merge in the working directory"""
1766
1766
1767 def temp(prefix, node):
1767 def temp(prefix, node):
1768 pre = "%s~%s." % (os.path.basename(fn), prefix)
1768 pre = "%s~%s." % (os.path.basename(fn), prefix)
1769 (fd, name) = tempfile.mkstemp("", pre)
1769 (fd, name) = tempfile.mkstemp("", pre)
1770 f = os.fdopen(fd, "wb")
1770 f = os.fdopen(fd, "wb")
1771 self.wwrite(fn, fl.read(node), f)
1771 self.wwrite(fn, fl.read(node), f)
1772 f.close()
1772 f.close()
1773 return name
1773 return name
1774
1774
1775 fl = self.file(fn)
1775 fl = self.file(fn)
1776 base = fl.ancestor(my, other)
1776 base = fl.ancestor(my, other)
1777 a = self.wjoin(fn)
1777 a = self.wjoin(fn)
1778 b = temp("base", base)
1778 b = temp("base", base)
1779 c = temp("other", other)
1779 c = temp("other", other)
1780
1780
1781 self.ui.note(_("resolving %s\n") % fn)
1781 self.ui.note(_("resolving %s\n") % fn)
1782 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1782 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1783 (fn, short(my), short(other), short(base)))
1783 (fn, short(my), short(other), short(base)))
1784
1784
1785 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1785 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1786 or "hgmerge")
1786 or "hgmerge")
1787 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1787 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1788 environ={'HG_FILE': fn,
1788 environ={'HG_FILE': fn,
1789 'HG_MY_NODE': p1,
1789 'HG_MY_NODE': p1,
1790 'HG_OTHER_NODE': p2,
1790 'HG_OTHER_NODE': p2,
1791 'HG_FILE_MY_NODE': hex(my),
1791 'HG_FILE_MY_NODE': hex(my),
1792 'HG_FILE_OTHER_NODE': hex(other),
1792 'HG_FILE_OTHER_NODE': hex(other),
1793 'HG_FILE_BASE_NODE': hex(base)})
1793 'HG_FILE_BASE_NODE': hex(base)})
1794 if r:
1794 if r:
1795 self.ui.warn(_("merging %s failed!\n") % fn)
1795 self.ui.warn(_("merging %s failed!\n") % fn)
1796
1796
1797 os.unlink(b)
1797 os.unlink(b)
1798 os.unlink(c)
1798 os.unlink(c)
1799 return r
1799 return r
1800
1800
1801 def verify(self):
1801 def verify(self):
1802 filelinkrevs = {}
1802 filelinkrevs = {}
1803 filenodes = {}
1803 filenodes = {}
1804 changesets = revisions = files = 0
1804 changesets = revisions = files = 0
1805 errors = [0]
1805 errors = [0]
1806 neededmanifests = {}
1806 neededmanifests = {}
1807
1807
1808 def err(msg):
1808 def err(msg):
1809 self.ui.warn(msg + "\n")
1809 self.ui.warn(msg + "\n")
1810 errors[0] += 1
1810 errors[0] += 1
1811
1811
1812 def checksize(obj, name):
1812 def checksize(obj, name):
1813 d = obj.checksize()
1813 d = obj.checksize()
1814 if d[0]:
1814 if d[0]:
1815 err(_("%s data length off by %d bytes") % (name, d[0]))
1815 err(_("%s data length off by %d bytes") % (name, d[0]))
1816 if d[1]:
1816 if d[1]:
1817 err(_("%s index contains %d extra bytes") % (name, d[1]))
1817 err(_("%s index contains %d extra bytes") % (name, d[1]))
1818
1818
1819 seen = {}
1819 seen = {}
1820 self.ui.status(_("checking changesets\n"))
1820 self.ui.status(_("checking changesets\n"))
1821 checksize(self.changelog, "changelog")
1821 checksize(self.changelog, "changelog")
1822
1822
1823 for i in range(self.changelog.count()):
1823 for i in range(self.changelog.count()):
1824 changesets += 1
1824 changesets += 1
1825 n = self.changelog.node(i)
1825 n = self.changelog.node(i)
1826 l = self.changelog.linkrev(n)
1826 l = self.changelog.linkrev(n)
1827 if l != i:
1827 if l != i:
1828 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1828 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1829 if n in seen:
1829 if n in seen:
1830 err(_("duplicate changeset at revision %d") % i)
1830 err(_("duplicate changeset at revision %d") % i)
1831 seen[n] = 1
1831 seen[n] = 1
1832
1832
1833 for p in self.changelog.parents(n):
1833 for p in self.changelog.parents(n):
1834 if p not in self.changelog.nodemap:
1834 if p not in self.changelog.nodemap:
1835 err(_("changeset %s has unknown parent %s") %
1835 err(_("changeset %s has unknown parent %s") %
1836 (short(n), short(p)))
1836 (short(n), short(p)))
1837 try:
1837 try:
1838 changes = self.changelog.read(n)
1838 changes = self.changelog.read(n)
1839 except KeyboardInterrupt:
1839 except KeyboardInterrupt:
1840 self.ui.warn(_("interrupted"))
1840 self.ui.warn(_("interrupted"))
1841 raise
1841 raise
1842 except Exception, inst:
1842 except Exception, inst:
1843 err(_("unpacking changeset %s: %s") % (short(n), inst))
1843 err(_("unpacking changeset %s: %s") % (short(n), inst))
1844 continue
1844 continue
1845
1845
1846 neededmanifests[changes[0]] = n
1846 neededmanifests[changes[0]] = n
1847
1847
1848 for f in changes[3]:
1848 for f in changes[3]:
1849 filelinkrevs.setdefault(f, []).append(i)
1849 filelinkrevs.setdefault(f, []).append(i)
1850
1850
1851 seen = {}
1851 seen = {}
1852 self.ui.status(_("checking manifests\n"))
1852 self.ui.status(_("checking manifests\n"))
1853 checksize(self.manifest, "manifest")
1853 checksize(self.manifest, "manifest")
1854
1854
1855 for i in range(self.manifest.count()):
1855 for i in range(self.manifest.count()):
1856 n = self.manifest.node(i)
1856 n = self.manifest.node(i)
1857 l = self.manifest.linkrev(n)
1857 l = self.manifest.linkrev(n)
1858
1858
1859 if l < 0 or l >= self.changelog.count():
1859 if l < 0 or l >= self.changelog.count():
1860 err(_("bad manifest link (%d) at revision %d") % (l, i))
1860 err(_("bad manifest link (%d) at revision %d") % (l, i))
1861
1861
1862 if n in neededmanifests:
1862 if n in neededmanifests:
1863 del neededmanifests[n]
1863 del neededmanifests[n]
1864
1864
1865 if n in seen:
1865 if n in seen:
1866 err(_("duplicate manifest at revision %d") % i)
1866 err(_("duplicate manifest at revision %d") % i)
1867
1867
1868 seen[n] = 1
1868 seen[n] = 1
1869
1869
1870 for p in self.manifest.parents(n):
1870 for p in self.manifest.parents(n):
1871 if p not in self.manifest.nodemap:
1871 if p not in self.manifest.nodemap:
1872 err(_("manifest %s has unknown parent %s") %
1872 err(_("manifest %s has unknown parent %s") %
1873 (short(n), short(p)))
1873 (short(n), short(p)))
1874
1874
1875 try:
1875 try:
1876 delta = mdiff.patchtext(self.manifest.delta(n))
1876 delta = mdiff.patchtext(self.manifest.delta(n))
1877 except KeyboardInterrupt:
1877 except KeyboardInterrupt:
1878 self.ui.warn(_("interrupted"))
1878 self.ui.warn(_("interrupted"))
1879 raise
1879 raise
1880 except Exception, inst:
1880 except Exception, inst:
1881 err(_("unpacking manifest %s: %s") % (short(n), inst))
1881 err(_("unpacking manifest %s: %s") % (short(n), inst))
1882 continue
1882 continue
1883
1883
1884 try:
1884 try:
1885 ff = [ l.split('\0') for l in delta.splitlines() ]
1885 ff = [ l.split('\0') for l in delta.splitlines() ]
1886 for f, fn in ff:
1886 for f, fn in ff:
1887 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1887 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1888 except (ValueError, TypeError), inst:
1888 except (ValueError, TypeError), inst:
1889 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1889 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1890
1890
1891 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1891 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1892
1892
1893 for m, c in neededmanifests.items():
1893 for m, c in neededmanifests.items():
1894 err(_("Changeset %s refers to unknown manifest %s") %
1894 err(_("Changeset %s refers to unknown manifest %s") %
1895 (short(m), short(c)))
1895 (short(m), short(c)))
1896 del neededmanifests
1896 del neededmanifests
1897
1897
1898 for f in filenodes:
1898 for f in filenodes:
1899 if f not in filelinkrevs:
1899 if f not in filelinkrevs:
1900 err(_("file %s in manifest but not in changesets") % f)
1900 err(_("file %s in manifest but not in changesets") % f)
1901
1901
1902 for f in filelinkrevs:
1902 for f in filelinkrevs:
1903 if f not in filenodes:
1903 if f not in filenodes:
1904 err(_("file %s in changeset but not in manifest") % f)
1904 err(_("file %s in changeset but not in manifest") % f)
1905
1905
1906 self.ui.status(_("checking files\n"))
1906 self.ui.status(_("checking files\n"))
1907 ff = filenodes.keys()
1907 ff = filenodes.keys()
1908 ff.sort()
1908 ff.sort()
1909 for f in ff:
1909 for f in ff:
1910 if f == "/dev/null":
1910 if f == "/dev/null":
1911 continue
1911 continue
1912 files += 1
1912 files += 1
1913 if not f:
1913 if not f:
1914 err(_("file without name in manifest %s") % short(n))
1914 err(_("file without name in manifest %s") % short(n))
1915 continue
1915 continue
1916 fl = self.file(f)
1916 fl = self.file(f)
1917 checksize(fl, f)
1917 checksize(fl, f)
1918
1918
1919 nodes = {nullid: 1}
1919 nodes = {nullid: 1}
1920 seen = {}
1920 seen = {}
1921 for i in range(fl.count()):
1921 for i in range(fl.count()):
1922 revisions += 1
1922 revisions += 1
1923 n = fl.node(i)
1923 n = fl.node(i)
1924
1924
1925 if n in seen:
1925 if n in seen:
1926 err(_("%s: duplicate revision %d") % (f, i))
1926 err(_("%s: duplicate revision %d") % (f, i))
1927 if n not in filenodes[f]:
1927 if n not in filenodes[f]:
1928 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1928 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1929 else:
1929 else:
1930 del filenodes[f][n]
1930 del filenodes[f][n]
1931
1931
1932 flr = fl.linkrev(n)
1932 flr = fl.linkrev(n)
1933 if flr not in filelinkrevs.get(f, []):
1933 if flr not in filelinkrevs.get(f, []):
1934 err(_("%s:%s points to unexpected changeset %d")
1934 err(_("%s:%s points to unexpected changeset %d")
1935 % (f, short(n), flr))
1935 % (f, short(n), flr))
1936 else:
1936 else:
1937 filelinkrevs[f].remove(flr)
1937 filelinkrevs[f].remove(flr)
1938
1938
1939 # verify contents
1939 # verify contents
1940 try:
1940 try:
1941 t = fl.read(n)
1941 t = fl.read(n)
1942 except KeyboardInterrupt:
1942 except KeyboardInterrupt:
1943 self.ui.warn(_("interrupted"))
1943 self.ui.warn(_("interrupted"))
1944 raise
1944 raise
1945 except Exception, inst:
1945 except Exception, inst:
1946 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1946 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1947
1947
1948 # verify parents
1948 # verify parents
1949 (p1, p2) = fl.parents(n)
1949 (p1, p2) = fl.parents(n)
1950 if p1 not in nodes:
1950 if p1 not in nodes:
1951 err(_("file %s:%s unknown parent 1 %s") %
1951 err(_("file %s:%s unknown parent 1 %s") %
1952 (f, short(n), short(p1)))
1952 (f, short(n), short(p1)))
1953 if p2 not in nodes:
1953 if p2 not in nodes:
1954 err(_("file %s:%s unknown parent 2 %s") %
1954 err(_("file %s:%s unknown parent 2 %s") %
1955 (f, short(n), short(p1)))
1955 (f, short(n), short(p1)))
1956 nodes[n] = 1
1956 nodes[n] = 1
1957
1957
1958 # cross-check
1958 # cross-check
1959 for node in filenodes[f]:
1959 for node in filenodes[f]:
1960 err(_("node %s in manifests not in %s") % (hex(node), f))
1960 err(_("node %s in manifests not in %s") % (hex(node), f))
1961
1961
1962 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1962 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1963 (files, changesets, revisions))
1963 (files, changesets, revisions))
1964
1964
1965 if errors[0]:
1965 if errors[0]:
1966 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1966 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1967 return 1
1967 return 1
1968
1968
1969 # used to avoid circular references so destructors work
1969 # used to avoid circular references so destructors work
1970 def aftertrans(base):
1970 def aftertrans(base):
1971 p = base
1971 p = base
1972 def a():
1972 def a():
1973 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1973 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1974 util.rename(os.path.join(p, "journal.dirstate"),
1974 util.rename(os.path.join(p, "journal.dirstate"),
1975 os.path.join(p, "undo.dirstate"))
1975 os.path.join(p, "undo.dirstate"))
1976 return a
1976 return a
1977
1977
@@ -1,176 +1,176
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct
8 import struct
9 from revlog import *
9 from revlog import *
10 from i18n import gettext as _
10 from i18n import gettext as _
11 from demandload import *
11 from demandload import *
12 demandload(globals(), "bisect array")
12 demandload(globals(), "bisect array")
13
13
14 class manifest(revlog):
14 class manifest(revlog):
15 def __init__(self, opener, defversion=0):
15 def __init__(self, opener, defversion=REVLOGV0):
16 self.mapcache = None
16 self.mapcache = None
17 self.listcache = None
17 self.listcache = None
18 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
18 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
19 defversion)
19 defversion)
20
20
21 def read(self, node):
21 def read(self, node):
22 if node == nullid: return {} # don't upset local cache
22 if node == nullid: return {} # don't upset local cache
23 if self.mapcache and self.mapcache[0] == node:
23 if self.mapcache and self.mapcache[0] == node:
24 return self.mapcache[1]
24 return self.mapcache[1]
25 text = self.revision(node)
25 text = self.revision(node)
26 map = {}
26 map = {}
27 flag = {}
27 flag = {}
28 self.listcache = array.array('c', text)
28 self.listcache = array.array('c', text)
29 lines = text.splitlines(1)
29 lines = text.splitlines(1)
30 for l in lines:
30 for l in lines:
31 (f, n) = l.split('\0')
31 (f, n) = l.split('\0')
32 map[f] = bin(n[:40])
32 map[f] = bin(n[:40])
33 flag[f] = (n[40:-1] == "x")
33 flag[f] = (n[40:-1] == "x")
34 self.mapcache = (node, map, flag)
34 self.mapcache = (node, map, flag)
35 return map
35 return map
36
36
37 def readflags(self, node):
37 def readflags(self, node):
38 if node == nullid: return {} # don't upset local cache
38 if node == nullid: return {} # don't upset local cache
39 if not self.mapcache or self.mapcache[0] != node:
39 if not self.mapcache or self.mapcache[0] != node:
40 self.read(node)
40 self.read(node)
41 return self.mapcache[2]
41 return self.mapcache[2]
42
42
43 def diff(self, a, b):
43 def diff(self, a, b):
44 return mdiff.textdiff(str(a), str(b))
44 return mdiff.textdiff(str(a), str(b))
45
45
46 def add(self, map, flags, transaction, link, p1=None, p2=None,
46 def add(self, map, flags, transaction, link, p1=None, p2=None,
47 changed=None):
47 changed=None):
48
48
49 # returns a tuple (start, end). If the string is found
49 # returns a tuple (start, end). If the string is found
50 # m[start:end] are the line containing that string. If start == end
50 # m[start:end] are the line containing that string. If start == end
51 # the string was not found and they indicate the proper sorted
51 # the string was not found and they indicate the proper sorted
52 # insertion point. This was taken from bisect_left, and modified
52 # insertion point. This was taken from bisect_left, and modified
53 # to find line start/end as it goes along.
53 # to find line start/end as it goes along.
54 #
54 #
55 # m should be a buffer or a string
55 # m should be a buffer or a string
56 # s is a string
56 # s is a string
57 #
57 #
58 def manifestsearch(m, s, lo=0, hi=None):
58 def manifestsearch(m, s, lo=0, hi=None):
59 def advance(i, c):
59 def advance(i, c):
60 while i < lenm and m[i] != c:
60 while i < lenm and m[i] != c:
61 i += 1
61 i += 1
62 return i
62 return i
63 lenm = len(m)
63 lenm = len(m)
64 if not hi:
64 if not hi:
65 hi = lenm
65 hi = lenm
66 while lo < hi:
66 while lo < hi:
67 mid = (lo + hi) // 2
67 mid = (lo + hi) // 2
68 start = mid
68 start = mid
69 while start > 0 and m[start-1] != '\n':
69 while start > 0 and m[start-1] != '\n':
70 start -= 1
70 start -= 1
71 end = advance(start, '\0')
71 end = advance(start, '\0')
72 if m[start:end] < s:
72 if m[start:end] < s:
73 # we know that after the null there are 40 bytes of sha1
73 # we know that after the null there are 40 bytes of sha1
74 # this translates to the bisect lo = mid + 1
74 # this translates to the bisect lo = mid + 1
75 lo = advance(end + 40, '\n') + 1
75 lo = advance(end + 40, '\n') + 1
76 else:
76 else:
77 # this translates to the bisect hi = mid
77 # this translates to the bisect hi = mid
78 hi = start
78 hi = start
79 end = advance(lo, '\0')
79 end = advance(lo, '\0')
80 found = m[lo:end]
80 found = m[lo:end]
81 if cmp(s, found) == 0:
81 if cmp(s, found) == 0:
82 # we know that after the null there are 40 bytes of sha1
82 # we know that after the null there are 40 bytes of sha1
83 end = advance(end + 40, '\n')
83 end = advance(end + 40, '\n')
84 return (lo, end+1)
84 return (lo, end+1)
85 else:
85 else:
86 return (lo, lo)
86 return (lo, lo)
87
87
88 # apply the changes collected during the bisect loop to our addlist
88 # apply the changes collected during the bisect loop to our addlist
89 # return a delta suitable for addrevision
89 # return a delta suitable for addrevision
90 def addlistdelta(addlist, x):
90 def addlistdelta(addlist, x):
91 # start from the bottom up
91 # start from the bottom up
92 # so changes to the offsets don't mess things up.
92 # so changes to the offsets don't mess things up.
93 i = len(x)
93 i = len(x)
94 while i > 0:
94 while i > 0:
95 i -= 1
95 i -= 1
96 start = x[i][0]
96 start = x[i][0]
97 end = x[i][1]
97 end = x[i][1]
98 if x[i][2]:
98 if x[i][2]:
99 addlist[start:end] = array.array('c', x[i][2])
99 addlist[start:end] = array.array('c', x[i][2])
100 else:
100 else:
101 del addlist[start:end]
101 del addlist[start:end]
102 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
102 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
103 for d in x ])
103 for d in x ])
104
104
105 # if we're using the listcache, make sure it is valid and
105 # if we're using the listcache, make sure it is valid and
106 # parented by the same node we're diffing against
106 # parented by the same node we're diffing against
107 if not changed or not self.listcache or not p1 or \
107 if not changed or not self.listcache or not p1 or \
108 self.mapcache[0] != p1:
108 self.mapcache[0] != p1:
109 files = map.keys()
109 files = map.keys()
110 files.sort()
110 files.sort()
111
111
112 # if this is changed to support newlines in filenames,
112 # if this is changed to support newlines in filenames,
113 # be sure to check the templates/ dir again (especially *-raw.tmpl)
113 # be sure to check the templates/ dir again (especially *-raw.tmpl)
114 text = ["%s\000%s%s\n" %
114 text = ["%s\000%s%s\n" %
115 (f, hex(map[f]), flags[f] and "x" or '')
115 (f, hex(map[f]), flags[f] and "x" or '')
116 for f in files]
116 for f in files]
117 self.listcache = array.array('c', "".join(text))
117 self.listcache = array.array('c', "".join(text))
118 cachedelta = None
118 cachedelta = None
119 else:
119 else:
120 addlist = self.listcache
120 addlist = self.listcache
121
121
122 # combine the changed lists into one list for sorting
122 # combine the changed lists into one list for sorting
123 work = [[x, 0] for x in changed[0]]
123 work = [[x, 0] for x in changed[0]]
124 work[len(work):] = [[x, 1] for x in changed[1]]
124 work[len(work):] = [[x, 1] for x in changed[1]]
125 work.sort()
125 work.sort()
126
126
127 delta = []
127 delta = []
128 dstart = None
128 dstart = None
129 dend = None
129 dend = None
130 dline = [""]
130 dline = [""]
131 start = 0
131 start = 0
132 # zero copy representation of addlist as a buffer
132 # zero copy representation of addlist as a buffer
133 addbuf = buffer(addlist)
133 addbuf = buffer(addlist)
134
134
135 # start with a readonly loop that finds the offset of
135 # start with a readonly loop that finds the offset of
136 # each line and creates the deltas
136 # each line and creates the deltas
137 for w in work:
137 for w in work:
138 f = w[0]
138 f = w[0]
139 # bs will either be the index of the item or the insert point
139 # bs will either be the index of the item or the insert point
140 start, end = manifestsearch(addbuf, f, start)
140 start, end = manifestsearch(addbuf, f, start)
141 if w[1] == 0:
141 if w[1] == 0:
142 l = "%s\000%s%s\n" % (f, hex(map[f]),
142 l = "%s\000%s%s\n" % (f, hex(map[f]),
143 flags[f] and "x" or '')
143 flags[f] and "x" or '')
144 else:
144 else:
145 l = ""
145 l = ""
146 if start == end and w[1] == 1:
146 if start == end and w[1] == 1:
147 # item we want to delete was not found, error out
147 # item we want to delete was not found, error out
148 raise AssertionError(
148 raise AssertionError(
149 _("failed to remove %s from manifest\n") % f)
149 _("failed to remove %s from manifest\n") % f)
150 if dstart != None and dstart <= start and dend >= start:
150 if dstart != None and dstart <= start and dend >= start:
151 if dend < end:
151 if dend < end:
152 dend = end
152 dend = end
153 if l:
153 if l:
154 dline.append(l)
154 dline.append(l)
155 else:
155 else:
156 if dstart != None:
156 if dstart != None:
157 delta.append([dstart, dend, "".join(dline)])
157 delta.append([dstart, dend, "".join(dline)])
158 dstart = start
158 dstart = start
159 dend = end
159 dend = end
160 dline = [l]
160 dline = [l]
161
161
162 if dstart != None:
162 if dstart != None:
163 delta.append([dstart, dend, "".join(dline)])
163 delta.append([dstart, dend, "".join(dline)])
164 # apply the delta to the addlist, and get a delta for addrevision
164 # apply the delta to the addlist, and get a delta for addrevision
165 cachedelta = addlistdelta(addlist, delta)
165 cachedelta = addlistdelta(addlist, delta)
166
166
167 # the delta is only valid if we've been processing the tip revision
167 # the delta is only valid if we've been processing the tip revision
168 if self.mapcache[0] != self.tip():
168 if self.mapcache[0] != self.tip():
169 cachedelta = None
169 cachedelta = None
170 self.listcache = addlist
170 self.listcache = addlist
171
171
172 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
172 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
173 p2, cachedelta)
173 p2, cachedelta)
174 self.mapcache = (n, map, flags)
174 self.mapcache = (n, map, flags)
175
175
176 return n
176 return n
@@ -1,1241 +1,1241
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
17 demandload(globals(), "sha struct zlib")
17 demandload(globals(), "sha struct zlib")
18
18
19 # revlog version strings
19 # revlog version strings
20 REVLOGV0 = 0
20 REVLOGV0 = 0
21 REVLOGNG = 1
21 REVLOGNG = 1
22
22
23 # revlog flags
23 # revlog flags
24 REVLOGNGINLINEDATA = (1 << 16)
24 REVLOGNGINLINEDATA = (1 << 16)
25
25
26 def flagstr(flag):
26 def flagstr(flag):
27 if flag == "inline":
27 if flag == "inline":
28 return REVLOGNGINLINEDATA
28 return REVLOGNGINLINEDATA
29 raise RevlogError(_("unknown revlog flag %s" % flag))
29 raise RevlogError(_("unknown revlog flag %s" % flag))
30
30
31 def hash(text, p1, p2):
31 def hash(text, p1, p2):
32 """generate a hash from the given text and its parent hashes
32 """generate a hash from the given text and its parent hashes
33
33
34 This hash combines both the current file contents and its history
34 This hash combines both the current file contents and its history
35 in a manner that makes it easy to distinguish nodes with the same
35 in a manner that makes it easy to distinguish nodes with the same
36 content in the revision graph.
36 content in the revision graph.
37 """
37 """
38 l = [p1, p2]
38 l = [p1, p2]
39 l.sort()
39 l.sort()
40 s = sha.new(l[0])
40 s = sha.new(l[0])
41 s.update(l[1])
41 s.update(l[1])
42 s.update(text)
42 s.update(text)
43 return s.digest()
43 return s.digest()
44
44
45 def compress(text):
45 def compress(text):
46 """ generate a possibly-compressed representation of text """
46 """ generate a possibly-compressed representation of text """
47 if not text: return ("", text)
47 if not text: return ("", text)
48 if len(text) < 44:
48 if len(text) < 44:
49 if text[0] == '\0': return ("", text)
49 if text[0] == '\0': return ("", text)
50 return ('u', text)
50 return ('u', text)
51 bin = zlib.compress(text)
51 bin = zlib.compress(text)
52 if len(bin) > len(text):
52 if len(bin) > len(text):
53 if text[0] == '\0': return ("", text)
53 if text[0] == '\0': return ("", text)
54 return ('u', text)
54 return ('u', text)
55 return ("", bin)
55 return ("", bin)
56
56
57 def decompress(bin):
57 def decompress(bin):
58 """ decompress the given input """
58 """ decompress the given input """
59 if not bin: return bin
59 if not bin: return bin
60 t = bin[0]
60 t = bin[0]
61 if t == '\0': return bin
61 if t == '\0': return bin
62 if t == 'x': return zlib.decompress(bin)
62 if t == 'x': return zlib.decompress(bin)
63 if t == 'u': return bin[1:]
63 if t == 'u': return bin[1:]
64 raise RevlogError(_("unknown compression type %r") % t)
64 raise RevlogError(_("unknown compression type %r") % t)
65
65
66 indexformatv0 = ">4l20s20s20s"
66 indexformatv0 = ">4l20s20s20s"
67 v0shaoffset = 56
67 v0shaoffset = 56
68 # index ng:
68 # index ng:
69 # 6 bytes offset
69 # 6 bytes offset
70 # 2 bytes flags
70 # 2 bytes flags
71 # 4 bytes compressed length
71 # 4 bytes compressed length
72 # 4 bytes uncompressed length
72 # 4 bytes uncompressed length
73 # 4 bytes: base rev
73 # 4 bytes: base rev
74 # 4 bytes link rev
74 # 4 bytes link rev
75 # 4 bytes parent 1 rev
75 # 4 bytes parent 1 rev
76 # 4 bytes parent 2 rev
76 # 4 bytes parent 2 rev
77 # 32 bytes: nodeid
77 # 32 bytes: nodeid
78 indexformatng = ">Qiiiiii20s12x"
78 indexformatng = ">Qiiiiii20s12x"
79 ngshaoffset = 32
79 ngshaoffset = 32
80 versionformat = ">i"
80 versionformat = ">i"
81
81
82 class lazyparser(object):
82 class lazyparser(object):
83 """
83 """
84 this class avoids the need to parse the entirety of large indices
84 this class avoids the need to parse the entirety of large indices
85 """
85 """
86 def __init__(self, dataf, size, indexformat, shaoffset):
86 def __init__(self, dataf, size, indexformat, shaoffset):
87 self.dataf = dataf
87 self.dataf = dataf
88 self.format = indexformat
88 self.format = indexformat
89 self.s = struct.calcsize(indexformat)
89 self.s = struct.calcsize(indexformat)
90 self.indexformat = indexformat
90 self.indexformat = indexformat
91 self.datasize = size
91 self.datasize = size
92 self.l = size/self.s
92 self.l = size/self.s
93 self.index = [None] * self.l
93 self.index = [None] * self.l
94 self.map = {nullid: -1}
94 self.map = {nullid: -1}
95 self.allmap = 0
95 self.allmap = 0
96 self.all = 0
96 self.all = 0
97 self.mapfind_count = 0
97 self.mapfind_count = 0
98 self.shaoffset = shaoffset
98 self.shaoffset = shaoffset
99
99
100 def loadmap(self):
100 def loadmap(self):
101 """
101 """
102 during a commit, we need to make sure the rev being added is
102 during a commit, we need to make sure the rev being added is
103 not a duplicate. This requires loading the entire index,
103 not a duplicate. This requires loading the entire index,
104 which is fairly slow. loadmap can load up just the node map,
104 which is fairly slow. loadmap can load up just the node map,
105 which takes much less time.
105 which takes much less time.
106 """
106 """
107 if self.allmap: return
107 if self.allmap: return
108 start = 0
108 start = 0
109 end = self.datasize
109 end = self.datasize
110 self.allmap = 1
110 self.allmap = 1
111 cur = 0
111 cur = 0
112 count = 0
112 count = 0
113 blocksize = self.s * 256
113 blocksize = self.s * 256
114 self.dataf.seek(0)
114 self.dataf.seek(0)
115 while cur < end:
115 while cur < end:
116 data = self.dataf.read(blocksize)
116 data = self.dataf.read(blocksize)
117 off = 0
117 off = 0
118 for x in xrange(256):
118 for x in xrange(256):
119 n = data[off + self.shaoffset:off + self.shaoffset + 20]
119 n = data[off + self.shaoffset:off + self.shaoffset + 20]
120 self.map[n] = count
120 self.map[n] = count
121 count += 1
121 count += 1
122 if count >= self.l:
122 if count >= self.l:
123 break
123 break
124 off += self.s
124 off += self.s
125 cur += blocksize
125 cur += blocksize
126
126
127 def loadblock(self, blockstart, blocksize, data=None):
127 def loadblock(self, blockstart, blocksize, data=None):
128 if self.all: return
128 if self.all: return
129 if data is None:
129 if data is None:
130 self.dataf.seek(blockstart)
130 self.dataf.seek(blockstart)
131 data = self.dataf.read(blocksize)
131 data = self.dataf.read(blocksize)
132 lend = len(data) / self.s
132 lend = len(data) / self.s
133 i = blockstart / self.s
133 i = blockstart / self.s
134 off = 0
134 off = 0
135 for x in xrange(lend):
135 for x in xrange(lend):
136 if self.index[i + x] == None:
136 if self.index[i + x] == None:
137 b = data[off : off + self.s]
137 b = data[off : off + self.s]
138 self.index[i + x] = b
138 self.index[i + x] = b
139 n = b[self.shaoffset:self.shaoffset + 20]
139 n = b[self.shaoffset:self.shaoffset + 20]
140 self.map[n] = i + x
140 self.map[n] = i + x
141 off += self.s
141 off += self.s
142
142
143 def findnode(self, node):
143 def findnode(self, node):
144 """search backwards through the index file for a specific node"""
144 """search backwards through the index file for a specific node"""
145 if self.allmap: return None
145 if self.allmap: return None
146
146
147 # hg log will cause many many searches for the manifest
147 # hg log will cause many many searches for the manifest
148 # nodes. After we get called a few times, just load the whole
148 # nodes. After we get called a few times, just load the whole
149 # thing.
149 # thing.
150 if self.mapfind_count > 8:
150 if self.mapfind_count > 8:
151 self.loadmap()
151 self.loadmap()
152 if node in self.map:
152 if node in self.map:
153 return node
153 return node
154 return None
154 return None
155 self.mapfind_count += 1
155 self.mapfind_count += 1
156 last = self.l - 1
156 last = self.l - 1
157 while self.index[last] != None:
157 while self.index[last] != None:
158 if last == 0:
158 if last == 0:
159 self.all = 1
159 self.all = 1
160 self.allmap = 1
160 self.allmap = 1
161 return None
161 return None
162 last -= 1
162 last -= 1
163 end = (last + 1) * self.s
163 end = (last + 1) * self.s
164 blocksize = self.s * 256
164 blocksize = self.s * 256
165 while end >= 0:
165 while end >= 0:
166 start = max(end - blocksize, 0)
166 start = max(end - blocksize, 0)
167 self.dataf.seek(start)
167 self.dataf.seek(start)
168 data = self.dataf.read(end - start)
168 data = self.dataf.read(end - start)
169 findend = end - start
169 findend = end - start
170 while True:
170 while True:
171 # we're searching backwards, so weh have to make sure
171 # we're searching backwards, so weh have to make sure
172 # we don't find a changeset where this node is a parent
172 # we don't find a changeset where this node is a parent
173 off = data.rfind(node, 0, findend)
173 off = data.rfind(node, 0, findend)
174 findend = off
174 findend = off
175 if off >= 0:
175 if off >= 0:
176 i = off / self.s
176 i = off / self.s
177 off = i * self.s
177 off = i * self.s
178 n = data[off + self.shaoffset:off + self.shaoffset + 20]
178 n = data[off + self.shaoffset:off + self.shaoffset + 20]
179 if n == node:
179 if n == node:
180 self.map[n] = i + start / self.s
180 self.map[n] = i + start / self.s
181 return node
181 return node
182 else:
182 else:
183 break
183 break
184 end -= blocksize
184 end -= blocksize
185 return None
185 return None
186
186
187 def loadindex(self, i=None, end=None):
187 def loadindex(self, i=None, end=None):
188 if self.all: return
188 if self.all: return
189 all = False
189 all = False
190 if i == None:
190 if i == None:
191 blockstart = 0
191 blockstart = 0
192 blocksize = (512 / self.s) * self.s
192 blocksize = (512 / self.s) * self.s
193 end = self.datasize
193 end = self.datasize
194 all = True
194 all = True
195 else:
195 else:
196 if end:
196 if end:
197 blockstart = i * self.s
197 blockstart = i * self.s
198 end = end * self.s
198 end = end * self.s
199 blocksize = end - blockstart
199 blocksize = end - blockstart
200 else:
200 else:
201 blockstart = (i & ~(32)) * self.s
201 blockstart = (i & ~(32)) * self.s
202 blocksize = self.s * 64
202 blocksize = self.s * 64
203 end = blockstart + blocksize
203 end = blockstart + blocksize
204 while blockstart < end:
204 while blockstart < end:
205 self.loadblock(blockstart, blocksize)
205 self.loadblock(blockstart, blocksize)
206 blockstart += blocksize
206 blockstart += blocksize
207 if all: self.all = True
207 if all: self.all = True
208
208
209 class lazyindex(object):
209 class lazyindex(object):
210 """a lazy version of the index array"""
210 """a lazy version of the index array"""
211 def __init__(self, parser):
211 def __init__(self, parser):
212 self.p = parser
212 self.p = parser
213 def __len__(self):
213 def __len__(self):
214 return len(self.p.index)
214 return len(self.p.index)
215 def load(self, pos):
215 def load(self, pos):
216 if pos < 0:
216 if pos < 0:
217 pos += len(self.p.index)
217 pos += len(self.p.index)
218 self.p.loadindex(pos)
218 self.p.loadindex(pos)
219 return self.p.index[pos]
219 return self.p.index[pos]
220 def __getitem__(self, pos):
220 def __getitem__(self, pos):
221 ret = self.p.index[pos] or self.load(pos)
221 ret = self.p.index[pos] or self.load(pos)
222 if isinstance(ret, str):
222 if isinstance(ret, str):
223 ret = struct.unpack(self.p.indexformat, ret)
223 ret = struct.unpack(self.p.indexformat, ret)
224 return ret
224 return ret
225 def __setitem__(self, pos, item):
225 def __setitem__(self, pos, item):
226 self.p.index[pos] = item
226 self.p.index[pos] = item
227 def __delitem__(self, pos):
227 def __delitem__(self, pos):
228 del self.p.index[pos]
228 del self.p.index[pos]
229 def append(self, e):
229 def append(self, e):
230 self.p.index.append(e)
230 self.p.index.append(e)
231
231
232 class lazymap(object):
232 class lazymap(object):
233 """a lazy version of the node map"""
233 """a lazy version of the node map"""
234 def __init__(self, parser):
234 def __init__(self, parser):
235 self.p = parser
235 self.p = parser
236 def load(self, key):
236 def load(self, key):
237 n = self.p.findnode(key)
237 n = self.p.findnode(key)
238 if n == None:
238 if n == None:
239 raise KeyError(key)
239 raise KeyError(key)
240 def __contains__(self, key):
240 def __contains__(self, key):
241 if key in self.p.map:
241 if key in self.p.map:
242 return True
242 return True
243 self.p.loadmap()
243 self.p.loadmap()
244 return key in self.p.map
244 return key in self.p.map
245 def __iter__(self):
245 def __iter__(self):
246 yield nullid
246 yield nullid
247 for i in xrange(self.p.l):
247 for i in xrange(self.p.l):
248 ret = self.p.index[i]
248 ret = self.p.index[i]
249 if not ret:
249 if not ret:
250 self.p.loadindex(i)
250 self.p.loadindex(i)
251 ret = self.p.index[i]
251 ret = self.p.index[i]
252 if isinstance(ret, str):
252 if isinstance(ret, str):
253 ret = struct.unpack(self.p.indexformat, ret)
253 ret = struct.unpack(self.p.indexformat, ret)
254 yield ret[-1]
254 yield ret[-1]
255 def __getitem__(self, key):
255 def __getitem__(self, key):
256 try:
256 try:
257 return self.p.map[key]
257 return self.p.map[key]
258 except KeyError:
258 except KeyError:
259 try:
259 try:
260 self.load(key)
260 self.load(key)
261 return self.p.map[key]
261 return self.p.map[key]
262 except KeyError:
262 except KeyError:
263 raise KeyError("node " + hex(key))
263 raise KeyError("node " + hex(key))
264 def __setitem__(self, key, val):
264 def __setitem__(self, key, val):
265 self.p.map[key] = val
265 self.p.map[key] = val
266 def __delitem__(self, key):
266 def __delitem__(self, key):
267 del self.p.map[key]
267 del self.p.map[key]
268
268
269 class RevlogError(Exception): pass
269 class RevlogError(Exception): pass
270
270
271 class revlog(object):
271 class revlog(object):
272 """
272 """
273 the underlying revision storage object
273 the underlying revision storage object
274
274
275 A revlog consists of two parts, an index and the revision data.
275 A revlog consists of two parts, an index and the revision data.
276
276
277 The index is a file with a fixed record size containing
277 The index is a file with a fixed record size containing
278 information on each revision, includings its nodeid (hash), the
278 information on each revision, includings its nodeid (hash), the
279 nodeids of its parents, the position and offset of its data within
279 nodeids of its parents, the position and offset of its data within
280 the data file, and the revision it's based on. Finally, each entry
280 the data file, and the revision it's based on. Finally, each entry
281 contains a linkrev entry that can serve as a pointer to external
281 contains a linkrev entry that can serve as a pointer to external
282 data.
282 data.
283
283
284 The revision data itself is a linear collection of data chunks.
284 The revision data itself is a linear collection of data chunks.
285 Each chunk represents a revision and is usually represented as a
285 Each chunk represents a revision and is usually represented as a
286 delta against the previous chunk. To bound lookup time, runs of
286 delta against the previous chunk. To bound lookup time, runs of
287 deltas are limited to about 2 times the length of the original
287 deltas are limited to about 2 times the length of the original
288 version data. This makes retrieval of a version proportional to
288 version data. This makes retrieval of a version proportional to
289 its size, or O(1) relative to the number of revisions.
289 its size, or O(1) relative to the number of revisions.
290
290
291 Both pieces of the revlog are written to in an append-only
291 Both pieces of the revlog are written to in an append-only
292 fashion, which means we never need to rewrite a file to insert or
292 fashion, which means we never need to rewrite a file to insert or
293 remove data, and can use some simple techniques to avoid the need
293 remove data, and can use some simple techniques to avoid the need
294 for locking while reading.
294 for locking while reading.
295 """
295 """
296 def __init__(self, opener, indexfile, datafile, defversion=0):
296 def __init__(self, opener, indexfile, datafile, defversion=REVLOGV0):
297 """
297 """
298 create a revlog object
298 create a revlog object
299
299
300 opener is a function that abstracts the file opening operation
300 opener is a function that abstracts the file opening operation
301 and can be used to implement COW semantics or the like.
301 and can be used to implement COW semantics or the like.
302 """
302 """
303 self.indexfile = indexfile
303 self.indexfile = indexfile
304 self.datafile = datafile
304 self.datafile = datafile
305 self.opener = opener
305 self.opener = opener
306
306
307 self.indexstat = None
307 self.indexstat = None
308 self.cache = None
308 self.cache = None
309 self.chunkcache = None
309 self.chunkcache = None
310 self.defversion = defversion
310 self.defversion = defversion
311 self.load()
311 self.load()
312
312
313 def load(self):
313 def load(self):
314 v = self.defversion
314 v = self.defversion
315 try:
315 try:
316 f = self.opener(self.indexfile)
316 f = self.opener(self.indexfile)
317 i = f.read(4)
317 i = f.read(4)
318 f.seek(0)
318 f.seek(0)
319 except IOError, inst:
319 except IOError, inst:
320 if inst.errno != errno.ENOENT:
320 if inst.errno != errno.ENOENT:
321 raise
321 raise
322 i = ""
322 i = ""
323 else:
323 else:
324 try:
324 try:
325 st = os.fstat(f.fileno())
325 st = os.fstat(f.fileno())
326 except AttributeError, inst:
326 except AttributeError, inst:
327 st = None
327 st = None
328 else:
328 else:
329 oldst = self.indexstat
329 oldst = self.indexstat
330 if (oldst and st.st_dev == oldst.st_dev
330 if (oldst and st.st_dev == oldst.st_dev
331 and st.st_ino == oldst.st_ino
331 and st.st_ino == oldst.st_ino
332 and st.st_mtime == oldst.st_mtime
332 and st.st_mtime == oldst.st_mtime
333 and st.st_ctime == oldst.st_ctime):
333 and st.st_ctime == oldst.st_ctime):
334 return
334 return
335 self.indexstat = st
335 self.indexstat = st
336 if len(i) > 0:
336 if len(i) > 0:
337 v = struct.unpack(versionformat, i)[0]
337 v = struct.unpack(versionformat, i)[0]
338 flags = v & ~0xFFFF
338 flags = v & ~0xFFFF
339 fmt = v & 0xFFFF
339 fmt = v & 0xFFFF
340 if fmt == 0:
340 if fmt == REVLOGV0:
341 if flags:
341 if flags:
342 raise RevlogError(_("index %s invalid flags %x for format v0" %
342 raise RevlogError(_("index %s invalid flags %x for format v0" %
343 (self.indexfile, flags)))
343 (self.indexfile, flags)))
344 elif fmt == REVLOGNG:
344 elif fmt == REVLOGNG:
345 if flags & ~REVLOGNGINLINEDATA:
345 if flags & ~REVLOGNGINLINEDATA:
346 raise RevlogError(_("index %s invalid flags %x for revlogng" %
346 raise RevlogError(_("index %s invalid flags %x for revlogng" %
347 (self.indexfile, flags)))
347 (self.indexfile, flags)))
348 else:
348 else:
349 raise RevlogError(_("index %s invalid format %d" %
349 raise RevlogError(_("index %s invalid format %d" %
350 (self.indexfile, fmt)))
350 (self.indexfile, fmt)))
351 self.version = v
351 self.version = v
352 if v == 0:
352 if v == REVLOGV0:
353 self.indexformat = indexformatv0
353 self.indexformat = indexformatv0
354 shaoffset = v0shaoffset
354 shaoffset = v0shaoffset
355 else:
355 else:
356 self.indexformat = indexformatng
356 self.indexformat = indexformatng
357 shaoffset = ngshaoffset
357 shaoffset = ngshaoffset
358
358
359 if i:
359 if i:
360 if not self.inlinedata() and st and st.st_size > 10000:
360 if not self.inlinedata() and st and st.st_size > 10000:
361 # big index, let's parse it on demand
361 # big index, let's parse it on demand
362 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
362 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
363 self.index = lazyindex(parser)
363 self.index = lazyindex(parser)
364 self.nodemap = lazymap(parser)
364 self.nodemap = lazymap(parser)
365 else:
365 else:
366 i = f.read()
366 i = f.read()
367 self.parseindex(i)
367 self.parseindex(i)
368 if self.inlinedata():
368 if self.inlinedata():
369 # we've already got the entire data file read in, save it
369 # we've already got the entire data file read in, save it
370 # in the chunk data
370 # in the chunk data
371 self.chunkcache = (0, i)
371 self.chunkcache = (0, i)
372 if self.version != 0:
372 if self.version != REVLOGV0:
373 e = list(self.index[0])
373 e = list(self.index[0])
374 type = self.ngtype(e[0])
374 type = self.ngtype(e[0])
375 e[0] = self.offset_type(0, type)
375 e[0] = self.offset_type(0, type)
376 self.index[0] = e
376 self.index[0] = e
377 else:
377 else:
378 self.nodemap = { nullid: -1}
378 self.nodemap = { nullid: -1}
379 self.index = []
379 self.index = []
380
380
381
381
382 def parseindex(self, data):
382 def parseindex(self, data):
383 s = struct.calcsize(self.indexformat)
383 s = struct.calcsize(self.indexformat)
384 l = len(data)
384 l = len(data)
385 self.index = []
385 self.index = []
386 self.nodemap = {nullid: -1}
386 self.nodemap = {nullid: -1}
387 inline = self.inlinedata()
387 inline = self.inlinedata()
388 off = 0
388 off = 0
389 n = 0
389 n = 0
390 while off < l:
390 while off < l:
391 e = struct.unpack(self.indexformat, data[off:off + s])
391 e = struct.unpack(self.indexformat, data[off:off + s])
392 self.index.append(e)
392 self.index.append(e)
393 self.nodemap[e[-1]] = n
393 self.nodemap[e[-1]] = n
394 n += 1
394 n += 1
395 off += s
395 off += s
396 if inline:
396 if inline:
397 off += e[1]
397 off += e[1]
398
398
399 def ngoffset(self, q):
399 def ngoffset(self, q):
400 if q & 0xFFFF:
400 if q & 0xFFFF:
401 raise RevlogError(_('%s: incompatible revision flag %x') %
401 raise RevlogError(_('%s: incompatible revision flag %x') %
402 (self.indexfile, q))
402 (self.indexfile, q))
403 return long(q >> 16)
403 return long(q >> 16)
404
404
405 def ngtype(self, q):
405 def ngtype(self, q):
406 return int(q & 0xFFFF)
406 return int(q & 0xFFFF)
407
407
408 def offset_type(self, offset, type):
408 def offset_type(self, offset, type):
409 return long(long(offset) << 16 | type)
409 return long(long(offset) << 16 | type)
410
410
411 def loadindex(self, start, end):
411 def loadindex(self, start, end):
412 """load a block of indexes all at once from the lazy parser"""
412 """load a block of indexes all at once from the lazy parser"""
413 if isinstance(self.index, lazyindex):
413 if isinstance(self.index, lazyindex):
414 self.index.p.loadindex(start, end)
414 self.index.p.loadindex(start, end)
415
415
416 def loadindexmap(self):
416 def loadindexmap(self):
417 """loads both the map and the index from the lazy parser"""
417 """loads both the map and the index from the lazy parser"""
418 if isinstance(self.index, lazyindex):
418 if isinstance(self.index, lazyindex):
419 p = self.index.p
419 p = self.index.p
420 p.loadindex()
420 p.loadindex()
421 self.nodemap = p.map
421 self.nodemap = p.map
422
422
423 def loadmap(self):
423 def loadmap(self):
424 """loads the map from the lazy parser"""
424 """loads the map from the lazy parser"""
425 if isinstance(self.nodemap, lazymap):
425 if isinstance(self.nodemap, lazymap):
426 self.nodemap.p.loadmap()
426 self.nodemap.p.loadmap()
427 self.nodemap = self.nodemap.p.map
427 self.nodemap = self.nodemap.p.map
428
428
429 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
429 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
430 def tip(self): return self.node(len(self.index) - 1)
430 def tip(self): return self.node(len(self.index) - 1)
431 def count(self): return len(self.index)
431 def count(self): return len(self.index)
432 def node(self, rev):
432 def node(self, rev):
433 return (rev < 0) and nullid or self.index[rev][-1]
433 return (rev < 0) and nullid or self.index[rev][-1]
434 def rev(self, node):
434 def rev(self, node):
435 try:
435 try:
436 return self.nodemap[node]
436 return self.nodemap[node]
437 except KeyError:
437 except KeyError:
438 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
438 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
439 def linkrev(self, node): return self.index[self.rev(node)][-4]
439 def linkrev(self, node): return self.index[self.rev(node)][-4]
440 def parents(self, node):
440 def parents(self, node):
441 if node == nullid: return (nullid, nullid)
441 if node == nullid: return (nullid, nullid)
442 r = self.rev(node)
442 r = self.rev(node)
443 d = self.index[r][-3:-1]
443 d = self.index[r][-3:-1]
444 if self.version == 0:
444 if self.version == REVLOGV0:
445 return d
445 return d
446 return [ self.node(x) for x in d ]
446 return [ self.node(x) for x in d ]
447 def start(self, rev):
447 def start(self, rev):
448 if rev < 0:
448 if rev < 0:
449 return -1
449 return -1
450 if self.version != 0:
450 if self.version != REVLOGV0:
451 return self.ngoffset(self.index[rev][0])
451 return self.ngoffset(self.index[rev][0])
452 return self.index[rev][0]
452 return self.index[rev][0]
453
453
454 def end(self, rev): return self.start(rev) + self.length(rev)
454 def end(self, rev): return self.start(rev) + self.length(rev)
455
455
456 def size(self, rev):
456 def size(self, rev):
457 """return the length of the uncompressed text for a given revision"""
457 """return the length of the uncompressed text for a given revision"""
458 l = -1
458 l = -1
459 if self.version != 0:
459 if self.version != REVLOGV0:
460 l = self.index[rev][2]
460 l = self.index[rev][2]
461 if l >= 0:
461 if l >= 0:
462 return l
462 return l
463
463
464 t = self.revision(self.node(rev))
464 t = self.revision(self.node(rev))
465 return len(t)
465 return len(t)
466
466
467 # alternate implementation, The advantage to this code is it
467 # alternate implementation, The advantage to this code is it
468 # will be faster for a single revision. But, the results are not
468 # will be faster for a single revision. But, the results are not
469 # cached, so finding the size of every revision will be slower.
469 # cached, so finding the size of every revision will be slower.
470 """
470 """
471 if self.cache and self.cache[1] == rev:
471 if self.cache and self.cache[1] == rev:
472 return len(self.cache[2])
472 return len(self.cache[2])
473
473
474 base = self.base(rev)
474 base = self.base(rev)
475 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
475 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
476 base = self.cache[1]
476 base = self.cache[1]
477 text = self.cache[2]
477 text = self.cache[2]
478 else:
478 else:
479 text = self.revision(self.node(base))
479 text = self.revision(self.node(base))
480
480
481 l = len(text)
481 l = len(text)
482 for x in xrange(base + 1, rev + 1):
482 for x in xrange(base + 1, rev + 1):
483 l = mdiff.patchedsize(l, self.chunk(x))
483 l = mdiff.patchedsize(l, self.chunk(x))
484 return l
484 return l
485 """
485 """
486
486
487 def length(self, rev):
487 def length(self, rev):
488 if rev < 0:
488 if rev < 0:
489 return 0
489 return 0
490 else:
490 else:
491 return self.index[rev][1]
491 return self.index[rev][1]
492 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
492 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
493
493
494 def reachable(self, rev, stop=None):
494 def reachable(self, rev, stop=None):
495 reachable = {}
495 reachable = {}
496 visit = [rev]
496 visit = [rev]
497 reachable[rev] = 1
497 reachable[rev] = 1
498 if stop:
498 if stop:
499 stopn = self.rev(stop)
499 stopn = self.rev(stop)
500 else:
500 else:
501 stopn = 0
501 stopn = 0
502 while visit:
502 while visit:
503 n = visit.pop(0)
503 n = visit.pop(0)
504 if n == stop:
504 if n == stop:
505 continue
505 continue
506 if n == nullid:
506 if n == nullid:
507 continue
507 continue
508 for p in self.parents(n):
508 for p in self.parents(n):
509 if self.rev(p) < stopn:
509 if self.rev(p) < stopn:
510 continue
510 continue
511 if p not in reachable:
511 if p not in reachable:
512 reachable[p] = 1
512 reachable[p] = 1
513 visit.append(p)
513 visit.append(p)
514 return reachable
514 return reachable
515
515
516 def nodesbetween(self, roots=None, heads=None):
516 def nodesbetween(self, roots=None, heads=None):
517 """Return a tuple containing three elements. Elements 1 and 2 contain
517 """Return a tuple containing three elements. Elements 1 and 2 contain
518 a final list bases and heads after all the unreachable ones have been
518 a final list bases and heads after all the unreachable ones have been
519 pruned. Element 0 contains a topologically sorted list of all
519 pruned. Element 0 contains a topologically sorted list of all
520
520
521 nodes that satisfy these constraints:
521 nodes that satisfy these constraints:
522 1. All nodes must be descended from a node in roots (the nodes on
522 1. All nodes must be descended from a node in roots (the nodes on
523 roots are considered descended from themselves).
523 roots are considered descended from themselves).
524 2. All nodes must also be ancestors of a node in heads (the nodes in
524 2. All nodes must also be ancestors of a node in heads (the nodes in
525 heads are considered to be their own ancestors).
525 heads are considered to be their own ancestors).
526
526
527 If roots is unspecified, nullid is assumed as the only root.
527 If roots is unspecified, nullid is assumed as the only root.
528 If heads is unspecified, it is taken to be the output of the
528 If heads is unspecified, it is taken to be the output of the
529 heads method (i.e. a list of all nodes in the repository that
529 heads method (i.e. a list of all nodes in the repository that
530 have no children)."""
530 have no children)."""
531 nonodes = ([], [], [])
531 nonodes = ([], [], [])
532 if roots is not None:
532 if roots is not None:
533 roots = list(roots)
533 roots = list(roots)
534 if not roots:
534 if not roots:
535 return nonodes
535 return nonodes
536 lowestrev = min([self.rev(n) for n in roots])
536 lowestrev = min([self.rev(n) for n in roots])
537 else:
537 else:
538 roots = [nullid] # Everybody's a descendent of nullid
538 roots = [nullid] # Everybody's a descendent of nullid
539 lowestrev = -1
539 lowestrev = -1
540 if (lowestrev == -1) and (heads is None):
540 if (lowestrev == -1) and (heads is None):
541 # We want _all_ the nodes!
541 # We want _all_ the nodes!
542 return ([self.node(r) for r in xrange(0, self.count())],
542 return ([self.node(r) for r in xrange(0, self.count())],
543 [nullid], list(self.heads()))
543 [nullid], list(self.heads()))
544 if heads is None:
544 if heads is None:
545 # All nodes are ancestors, so the latest ancestor is the last
545 # All nodes are ancestors, so the latest ancestor is the last
546 # node.
546 # node.
547 highestrev = self.count() - 1
547 highestrev = self.count() - 1
548 # Set ancestors to None to signal that every node is an ancestor.
548 # Set ancestors to None to signal that every node is an ancestor.
549 ancestors = None
549 ancestors = None
550 # Set heads to an empty dictionary for later discovery of heads
550 # Set heads to an empty dictionary for later discovery of heads
551 heads = {}
551 heads = {}
552 else:
552 else:
553 heads = list(heads)
553 heads = list(heads)
554 if not heads:
554 if not heads:
555 return nonodes
555 return nonodes
556 ancestors = {}
556 ancestors = {}
557 # Start at the top and keep marking parents until we're done.
557 # Start at the top and keep marking parents until we're done.
558 nodestotag = heads[:]
558 nodestotag = heads[:]
559 # Turn heads into a dictionary so we can remove 'fake' heads.
559 # Turn heads into a dictionary so we can remove 'fake' heads.
560 # Also, later we will be using it to filter out the heads we can't
560 # Also, later we will be using it to filter out the heads we can't
561 # find from roots.
561 # find from roots.
562 heads = dict.fromkeys(heads, 0)
562 heads = dict.fromkeys(heads, 0)
563 # Remember where the top was so we can use it as a limit later.
563 # Remember where the top was so we can use it as a limit later.
564 highestrev = max([self.rev(n) for n in nodestotag])
564 highestrev = max([self.rev(n) for n in nodestotag])
565 while nodestotag:
565 while nodestotag:
566 # grab a node to tag
566 # grab a node to tag
567 n = nodestotag.pop()
567 n = nodestotag.pop()
568 # Never tag nullid
568 # Never tag nullid
569 if n == nullid:
569 if n == nullid:
570 continue
570 continue
571 # A node's revision number represents its place in a
571 # A node's revision number represents its place in a
572 # topologically sorted list of nodes.
572 # topologically sorted list of nodes.
573 r = self.rev(n)
573 r = self.rev(n)
574 if r >= lowestrev:
574 if r >= lowestrev:
575 if n not in ancestors:
575 if n not in ancestors:
576 # If we are possibly a descendent of one of the roots
576 # If we are possibly a descendent of one of the roots
577 # and we haven't already been marked as an ancestor
577 # and we haven't already been marked as an ancestor
578 ancestors[n] = 1 # Mark as ancestor
578 ancestors[n] = 1 # Mark as ancestor
579 # Add non-nullid parents to list of nodes to tag.
579 # Add non-nullid parents to list of nodes to tag.
580 nodestotag.extend([p for p in self.parents(n) if
580 nodestotag.extend([p for p in self.parents(n) if
581 p != nullid])
581 p != nullid])
582 elif n in heads: # We've seen it before, is it a fake head?
582 elif n in heads: # We've seen it before, is it a fake head?
583 # So it is, real heads should not be the ancestors of
583 # So it is, real heads should not be the ancestors of
584 # any other heads.
584 # any other heads.
585 heads.pop(n)
585 heads.pop(n)
586 if not ancestors:
586 if not ancestors:
587 return nonodes
587 return nonodes
588 # Now that we have our set of ancestors, we want to remove any
588 # Now that we have our set of ancestors, we want to remove any
589 # roots that are not ancestors.
589 # roots that are not ancestors.
590
590
591 # If one of the roots was nullid, everything is included anyway.
591 # If one of the roots was nullid, everything is included anyway.
592 if lowestrev > -1:
592 if lowestrev > -1:
593 # But, since we weren't, let's recompute the lowest rev to not
593 # But, since we weren't, let's recompute the lowest rev to not
594 # include roots that aren't ancestors.
594 # include roots that aren't ancestors.
595
595
596 # Filter out roots that aren't ancestors of heads
596 # Filter out roots that aren't ancestors of heads
597 roots = [n for n in roots if n in ancestors]
597 roots = [n for n in roots if n in ancestors]
598 # Recompute the lowest revision
598 # Recompute the lowest revision
599 if roots:
599 if roots:
600 lowestrev = min([self.rev(n) for n in roots])
600 lowestrev = min([self.rev(n) for n in roots])
601 else:
601 else:
602 # No more roots? Return empty list
602 # No more roots? Return empty list
603 return nonodes
603 return nonodes
604 else:
604 else:
605 # We are descending from nullid, and don't need to care about
605 # We are descending from nullid, and don't need to care about
606 # any other roots.
606 # any other roots.
607 lowestrev = -1
607 lowestrev = -1
608 roots = [nullid]
608 roots = [nullid]
609 # Transform our roots list into a 'set' (i.e. a dictionary where the
609 # Transform our roots list into a 'set' (i.e. a dictionary where the
610 # values don't matter.
610 # values don't matter.
611 descendents = dict.fromkeys(roots, 1)
611 descendents = dict.fromkeys(roots, 1)
612 # Also, keep the original roots so we can filter out roots that aren't
612 # Also, keep the original roots so we can filter out roots that aren't
613 # 'real' roots (i.e. are descended from other roots).
613 # 'real' roots (i.e. are descended from other roots).
614 roots = descendents.copy()
614 roots = descendents.copy()
615 # Our topologically sorted list of output nodes.
615 # Our topologically sorted list of output nodes.
616 orderedout = []
616 orderedout = []
617 # Don't start at nullid since we don't want nullid in our output list,
617 # Don't start at nullid since we don't want nullid in our output list,
618 # and if nullid shows up in descedents, empty parents will look like
618 # and if nullid shows up in descedents, empty parents will look like
619 # they're descendents.
619 # they're descendents.
620 for r in xrange(max(lowestrev, 0), highestrev + 1):
620 for r in xrange(max(lowestrev, 0), highestrev + 1):
621 n = self.node(r)
621 n = self.node(r)
622 isdescendent = False
622 isdescendent = False
623 if lowestrev == -1: # Everybody is a descendent of nullid
623 if lowestrev == -1: # Everybody is a descendent of nullid
624 isdescendent = True
624 isdescendent = True
625 elif n in descendents:
625 elif n in descendents:
626 # n is already a descendent
626 # n is already a descendent
627 isdescendent = True
627 isdescendent = True
628 # This check only needs to be done here because all the roots
628 # This check only needs to be done here because all the roots
629 # will start being marked is descendents before the loop.
629 # will start being marked is descendents before the loop.
630 if n in roots:
630 if n in roots:
631 # If n was a root, check if it's a 'real' root.
631 # If n was a root, check if it's a 'real' root.
632 p = tuple(self.parents(n))
632 p = tuple(self.parents(n))
633 # If any of its parents are descendents, it's not a root.
633 # If any of its parents are descendents, it's not a root.
634 if (p[0] in descendents) or (p[1] in descendents):
634 if (p[0] in descendents) or (p[1] in descendents):
635 roots.pop(n)
635 roots.pop(n)
636 else:
636 else:
637 p = tuple(self.parents(n))
637 p = tuple(self.parents(n))
638 # A node is a descendent if either of its parents are
638 # A node is a descendent if either of its parents are
639 # descendents. (We seeded the dependents list with the roots
639 # descendents. (We seeded the dependents list with the roots
640 # up there, remember?)
640 # up there, remember?)
641 if (p[0] in descendents) or (p[1] in descendents):
641 if (p[0] in descendents) or (p[1] in descendents):
642 descendents[n] = 1
642 descendents[n] = 1
643 isdescendent = True
643 isdescendent = True
644 if isdescendent and ((ancestors is None) or (n in ancestors)):
644 if isdescendent and ((ancestors is None) or (n in ancestors)):
645 # Only include nodes that are both descendents and ancestors.
645 # Only include nodes that are both descendents and ancestors.
646 orderedout.append(n)
646 orderedout.append(n)
647 if (ancestors is not None) and (n in heads):
647 if (ancestors is not None) and (n in heads):
648 # We're trying to figure out which heads are reachable
648 # We're trying to figure out which heads are reachable
649 # from roots.
649 # from roots.
650 # Mark this head as having been reached
650 # Mark this head as having been reached
651 heads[n] = 1
651 heads[n] = 1
652 elif ancestors is None:
652 elif ancestors is None:
653 # Otherwise, we're trying to discover the heads.
653 # Otherwise, we're trying to discover the heads.
654 # Assume this is a head because if it isn't, the next step
654 # Assume this is a head because if it isn't, the next step
655 # will eventually remove it.
655 # will eventually remove it.
656 heads[n] = 1
656 heads[n] = 1
657 # But, obviously its parents aren't.
657 # But, obviously its parents aren't.
658 for p in self.parents(n):
658 for p in self.parents(n):
659 heads.pop(p, None)
659 heads.pop(p, None)
660 heads = [n for n in heads.iterkeys() if heads[n] != 0]
660 heads = [n for n in heads.iterkeys() if heads[n] != 0]
661 roots = roots.keys()
661 roots = roots.keys()
662 assert orderedout
662 assert orderedout
663 assert roots
663 assert roots
664 assert heads
664 assert heads
665 return (orderedout, roots, heads)
665 return (orderedout, roots, heads)
666
666
667 def heads(self, start=None):
667 def heads(self, start=None):
668 """return the list of all nodes that have no children
668 """return the list of all nodes that have no children
669
669
670 if start is specified, only heads that are descendants of
670 if start is specified, only heads that are descendants of
671 start will be returned
671 start will be returned
672
672
673 """
673 """
674 if start is None:
674 if start is None:
675 start = nullid
675 start = nullid
676 reachable = {start: 1}
676 reachable = {start: 1}
677 heads = {start: 1}
677 heads = {start: 1}
678 startrev = self.rev(start)
678 startrev = self.rev(start)
679
679
680 for r in xrange(startrev + 1, self.count()):
680 for r in xrange(startrev + 1, self.count()):
681 n = self.node(r)
681 n = self.node(r)
682 for pn in self.parents(n):
682 for pn in self.parents(n):
683 if pn in reachable:
683 if pn in reachable:
684 reachable[n] = 1
684 reachable[n] = 1
685 heads[n] = 1
685 heads[n] = 1
686 if pn in heads:
686 if pn in heads:
687 del heads[pn]
687 del heads[pn]
688 return heads.keys()
688 return heads.keys()
689
689
690 def children(self, node):
690 def children(self, node):
691 """find the children of a given node"""
691 """find the children of a given node"""
692 c = []
692 c = []
693 p = self.rev(node)
693 p = self.rev(node)
694 for r in range(p + 1, self.count()):
694 for r in range(p + 1, self.count()):
695 n = self.node(r)
695 n = self.node(r)
696 for pn in self.parents(n):
696 for pn in self.parents(n):
697 if pn == node:
697 if pn == node:
698 c.append(n)
698 c.append(n)
699 continue
699 continue
700 elif pn == nullid:
700 elif pn == nullid:
701 continue
701 continue
702 return c
702 return c
703
703
704 def lookup(self, id):
704 def lookup(self, id):
705 """locate a node based on revision number or subset of hex nodeid"""
705 """locate a node based on revision number or subset of hex nodeid"""
706 try:
706 try:
707 rev = int(id)
707 rev = int(id)
708 if str(rev) != id: raise ValueError
708 if str(rev) != id: raise ValueError
709 if rev < 0: rev = self.count() + rev
709 if rev < 0: rev = self.count() + rev
710 if rev < 0 or rev >= self.count(): raise ValueError
710 if rev < 0 or rev >= self.count(): raise ValueError
711 return self.node(rev)
711 return self.node(rev)
712 except (ValueError, OverflowError):
712 except (ValueError, OverflowError):
713 c = []
713 c = []
714 for n in self.nodemap:
714 for n in self.nodemap:
715 if hex(n).startswith(id):
715 if hex(n).startswith(id):
716 c.append(n)
716 c.append(n)
717 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
717 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
718 if len(c) < 1: raise RevlogError(_("No match found"))
718 if len(c) < 1: raise RevlogError(_("No match found"))
719 return c[0]
719 return c[0]
720
720
721 return None
721 return None
722
722
723 def diff(self, a, b):
723 def diff(self, a, b):
724 """return a delta between two revisions"""
724 """return a delta between two revisions"""
725 return mdiff.textdiff(a, b)
725 return mdiff.textdiff(a, b)
726
726
727 def patches(self, t, pl):
727 def patches(self, t, pl):
728 """apply a list of patches to a string"""
728 """apply a list of patches to a string"""
729 return mdiff.patches(t, pl)
729 return mdiff.patches(t, pl)
730
730
731 def chunk(self, rev, df=None, cachelen=4096):
731 def chunk(self, rev, df=None, cachelen=4096):
732 start, length = self.start(rev), self.length(rev)
732 start, length = self.start(rev), self.length(rev)
733 inline = self.inlinedata()
733 inline = self.inlinedata()
734 if inline:
734 if inline:
735 start += (rev + 1) * struct.calcsize(self.indexformat)
735 start += (rev + 1) * struct.calcsize(self.indexformat)
736 end = start + length
736 end = start + length
737 def loadcache(df):
737 def loadcache(df):
738 cache_length = max(cachelen, length) # 4k
738 cache_length = max(cachelen, length) # 4k
739 if not df:
739 if not df:
740 if inline:
740 if inline:
741 df = self.opener(self.indexfile)
741 df = self.opener(self.indexfile)
742 else:
742 else:
743 df = self.opener(self.datafile)
743 df = self.opener(self.datafile)
744 df.seek(start)
744 df.seek(start)
745 self.chunkcache = (start, df.read(cache_length))
745 self.chunkcache = (start, df.read(cache_length))
746
746
747 if not self.chunkcache:
747 if not self.chunkcache:
748 loadcache(df)
748 loadcache(df)
749
749
750 cache_start = self.chunkcache[0]
750 cache_start = self.chunkcache[0]
751 cache_end = cache_start + len(self.chunkcache[1])
751 cache_end = cache_start + len(self.chunkcache[1])
752 if start >= cache_start and end <= cache_end:
752 if start >= cache_start and end <= cache_end:
753 # it is cached
753 # it is cached
754 offset = start - cache_start
754 offset = start - cache_start
755 else:
755 else:
756 loadcache(df)
756 loadcache(df)
757 offset = 0
757 offset = 0
758
758
759 #def checkchunk():
759 #def checkchunk():
760 # df = self.opener(self.datafile)
760 # df = self.opener(self.datafile)
761 # df.seek(start)
761 # df.seek(start)
762 # return df.read(length)
762 # return df.read(length)
763 #assert s == checkchunk()
763 #assert s == checkchunk()
764 return decompress(self.chunkcache[1][offset:offset + length])
764 return decompress(self.chunkcache[1][offset:offset + length])
765
765
766 def delta(self, node):
766 def delta(self, node):
767 """return or calculate a delta between a node and its predecessor"""
767 """return or calculate a delta between a node and its predecessor"""
768 r = self.rev(node)
768 r = self.rev(node)
769 return self.revdiff(r - 1, r)
769 return self.revdiff(r - 1, r)
770
770
771 def revdiff(self, rev1, rev2):
771 def revdiff(self, rev1, rev2):
772 """return or calculate a delta between two revisions"""
772 """return or calculate a delta between two revisions"""
773 b1 = self.base(rev1)
773 b1 = self.base(rev1)
774 b2 = self.base(rev2)
774 b2 = self.base(rev2)
775 if b1 == b2 and rev1 + 1 == rev2:
775 if b1 == b2 and rev1 + 1 == rev2:
776 return self.chunk(rev2)
776 return self.chunk(rev2)
777 else:
777 else:
778 return self.diff(self.revision(self.node(rev1)),
778 return self.diff(self.revision(self.node(rev1)),
779 self.revision(self.node(rev2)))
779 self.revision(self.node(rev2)))
780
780
781 def revision(self, node):
781 def revision(self, node):
782 """return an uncompressed revision of a given"""
782 """return an uncompressed revision of a given"""
783 if node == nullid: return ""
783 if node == nullid: return ""
784 if self.cache and self.cache[0] == node: return self.cache[2]
784 if self.cache and self.cache[0] == node: return self.cache[2]
785
785
786 # look up what we need to read
786 # look up what we need to read
787 text = None
787 text = None
788 rev = self.rev(node)
788 rev = self.rev(node)
789 base = self.base(rev)
789 base = self.base(rev)
790
790
791 if self.inlinedata():
791 if self.inlinedata():
792 # we probably have the whole chunk cached
792 # we probably have the whole chunk cached
793 df = None
793 df = None
794 else:
794 else:
795 df = self.opener(self.datafile)
795 df = self.opener(self.datafile)
796
796
797 # do we have useful data cached?
797 # do we have useful data cached?
798 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
798 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
799 base = self.cache[1]
799 base = self.cache[1]
800 text = self.cache[2]
800 text = self.cache[2]
801 self.loadindex(base, rev + 1)
801 self.loadindex(base, rev + 1)
802 else:
802 else:
803 self.loadindex(base, rev + 1)
803 self.loadindex(base, rev + 1)
804 text = self.chunk(base, df=df)
804 text = self.chunk(base, df=df)
805
805
806 bins = []
806 bins = []
807 for r in xrange(base + 1, rev + 1):
807 for r in xrange(base + 1, rev + 1):
808 bins.append(self.chunk(r, df=df))
808 bins.append(self.chunk(r, df=df))
809
809
810 text = self.patches(text, bins)
810 text = self.patches(text, bins)
811
811
812 p1, p2 = self.parents(node)
812 p1, p2 = self.parents(node)
813 if node != hash(text, p1, p2):
813 if node != hash(text, p1, p2):
814 raise RevlogError(_("integrity check failed on %s:%d")
814 raise RevlogError(_("integrity check failed on %s:%d")
815 % (self.datafile, rev))
815 % (self.datafile, rev))
816
816
817 self.cache = (node, rev, text)
817 self.cache = (node, rev, text)
818 return text
818 return text
819
819
820 def checkinlinesize(self, tr, fp=None):
820 def checkinlinesize(self, tr, fp=None):
821 if not self.inlinedata():
821 if not self.inlinedata():
822 return
822 return
823 if not fp:
823 if not fp:
824 fp = self.opener(self.indexfile, 'r')
824 fp = self.opener(self.indexfile, 'r')
825 fp.seek(0, 2)
825 fp.seek(0, 2)
826 size = fp.tell()
826 size = fp.tell()
827 if size < 131072:
827 if size < 131072:
828 return
828 return
829 trinfo = tr.find(self.indexfile)
829 trinfo = tr.find(self.indexfile)
830 if trinfo == None:
830 if trinfo == None:
831 raise RevlogError(_("%s not found in the transaction" %
831 raise RevlogError(_("%s not found in the transaction" %
832 self.indexfile))
832 self.indexfile))
833
833
834 trindex = trinfo[2]
834 trindex = trinfo[2]
835 dataoff = self.start(trindex)
835 dataoff = self.start(trindex)
836
836
837 tr.add(self.datafile, dataoff)
837 tr.add(self.datafile, dataoff)
838 df = self.opener(self.datafile, 'w')
838 df = self.opener(self.datafile, 'w')
839 calc = struct.calcsize(self.indexformat)
839 calc = struct.calcsize(self.indexformat)
840 for r in xrange(self.count()):
840 for r in xrange(self.count()):
841 start = self.start(r) + (r + 1) * calc
841 start = self.start(r) + (r + 1) * calc
842 length = self.length(r)
842 length = self.length(r)
843 fp.seek(start)
843 fp.seek(start)
844 d = fp.read(length)
844 d = fp.read(length)
845 df.write(d)
845 df.write(d)
846 fp.close()
846 fp.close()
847 df.close()
847 df.close()
848 fp = self.opener(self.indexfile, 'w', atomictemp=True)
848 fp = self.opener(self.indexfile, 'w', atomictemp=True)
849 self.version &= ~(REVLOGNGINLINEDATA)
849 self.version &= ~(REVLOGNGINLINEDATA)
850 if self.count():
850 if self.count():
851 x = self.index[0]
851 x = self.index[0]
852 e = struct.pack(self.indexformat, *x)[4:]
852 e = struct.pack(self.indexformat, *x)[4:]
853 l = struct.pack(versionformat, self.version)
853 l = struct.pack(versionformat, self.version)
854 fp.write(l)
854 fp.write(l)
855 fp.write(e)
855 fp.write(e)
856
856
857 for i in xrange(1, self.count()):
857 for i in xrange(1, self.count()):
858 x = self.index[i]
858 x = self.index[i]
859 e = struct.pack(self.indexformat, *x)
859 e = struct.pack(self.indexformat, *x)
860 fp.write(e)
860 fp.write(e)
861
861
862 # if we don't call rename, the temp file will never replace the
862 # if we don't call rename, the temp file will never replace the
863 # real index
863 # real index
864 fp.rename()
864 fp.rename()
865
865
866 tr.replace(self.indexfile, trindex * calc)
866 tr.replace(self.indexfile, trindex * calc)
867 self.chunkcache = None
867 self.chunkcache = None
868
868
869 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
869 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
870 """add a revision to the log
870 """add a revision to the log
871
871
872 text - the revision data to add
872 text - the revision data to add
873 transaction - the transaction object used for rollback
873 transaction - the transaction object used for rollback
874 link - the linkrev data to add
874 link - the linkrev data to add
875 p1, p2 - the parent nodeids of the revision
875 p1, p2 - the parent nodeids of the revision
876 d - an optional precomputed delta
876 d - an optional precomputed delta
877 """
877 """
878 if text is None: text = ""
878 if text is None: text = ""
879 if p1 is None: p1 = self.tip()
879 if p1 is None: p1 = self.tip()
880 if p2 is None: p2 = nullid
880 if p2 is None: p2 = nullid
881
881
882 node = hash(text, p1, p2)
882 node = hash(text, p1, p2)
883
883
884 if node in self.nodemap:
884 if node in self.nodemap:
885 return node
885 return node
886
886
887 n = self.count()
887 n = self.count()
888 t = n - 1
888 t = n - 1
889
889
890 if n:
890 if n:
891 base = self.base(t)
891 base = self.base(t)
892 start = self.start(base)
892 start = self.start(base)
893 end = self.end(t)
893 end = self.end(t)
894 if not d:
894 if not d:
895 prev = self.revision(self.tip())
895 prev = self.revision(self.tip())
896 d = self.diff(prev, str(text))
896 d = self.diff(prev, str(text))
897 data = compress(d)
897 data = compress(d)
898 l = len(data[1]) + len(data[0])
898 l = len(data[1]) + len(data[0])
899 dist = end - start + l
899 dist = end - start + l
900
900
901 # full versions are inserted when the needed deltas
901 # full versions are inserted when the needed deltas
902 # become comparable to the uncompressed text
902 # become comparable to the uncompressed text
903 if not n or dist > len(text) * 2:
903 if not n or dist > len(text) * 2:
904 data = compress(text)
904 data = compress(text)
905 l = len(data[1]) + len(data[0])
905 l = len(data[1]) + len(data[0])
906 base = n
906 base = n
907 else:
907 else:
908 base = self.base(t)
908 base = self.base(t)
909
909
910 offset = 0
910 offset = 0
911 if t >= 0:
911 if t >= 0:
912 offset = self.end(t)
912 offset = self.end(t)
913
913
914 if self.version == 0:
914 if self.version == REVLOGV0:
915 e = (offset, l, base, link, p1, p2, node)
915 e = (offset, l, base, link, p1, p2, node)
916 else:
916 else:
917 e = (self.offset_type(offset, 0), l, len(text),
917 e = (self.offset_type(offset, 0), l, len(text),
918 base, link, self.rev(p1), self.rev(p2), node)
918 base, link, self.rev(p1), self.rev(p2), node)
919
919
920 self.index.append(e)
920 self.index.append(e)
921 self.nodemap[node] = n
921 self.nodemap[node] = n
922 entry = struct.pack(self.indexformat, *e)
922 entry = struct.pack(self.indexformat, *e)
923
923
924 if not self.inlinedata():
924 if not self.inlinedata():
925 transaction.add(self.datafile, offset)
925 transaction.add(self.datafile, offset)
926 transaction.add(self.indexfile, n * len(entry))
926 transaction.add(self.indexfile, n * len(entry))
927 f = self.opener(self.datafile, "a")
927 f = self.opener(self.datafile, "a")
928 if data[0]:
928 if data[0]:
929 f.write(data[0])
929 f.write(data[0])
930 f.write(data[1])
930 f.write(data[1])
931 f.close()
931 f.close()
932 f = self.opener(self.indexfile, "a")
932 f = self.opener(self.indexfile, "a")
933 else:
933 else:
934 f = self.opener(self.indexfile, "a+")
934 f = self.opener(self.indexfile, "a+")
935 f.seek(0, 2)
935 f.seek(0, 2)
936 transaction.add(self.indexfile, f.tell(), self.count() - 1)
936 transaction.add(self.indexfile, f.tell(), self.count() - 1)
937
937
938 if len(self.index) == 1 and self.version != 0:
938 if len(self.index) == 1 and self.version != REVLOGV0:
939 l = struct.pack(versionformat, self.version)
939 l = struct.pack(versionformat, self.version)
940 f.write(l)
940 f.write(l)
941 entry = entry[4:]
941 entry = entry[4:]
942
942
943 f.write(entry)
943 f.write(entry)
944
944
945 if self.inlinedata():
945 if self.inlinedata():
946 f.write(data[0])
946 f.write(data[0])
947 f.write(data[1])
947 f.write(data[1])
948 self.checkinlinesize(transaction, f)
948 self.checkinlinesize(transaction, f)
949
949
950 self.cache = (node, n, text)
950 self.cache = (node, n, text)
951 return node
951 return node
952
952
953 def ancestor(self, a, b):
953 def ancestor(self, a, b):
954 """calculate the least common ancestor of nodes a and b"""
954 """calculate the least common ancestor of nodes a and b"""
955
955
956 # start with some short cuts for the linear cases
956 # start with some short cuts for the linear cases
957 if a == b:
957 if a == b:
958 return a
958 return a
959 ra = self.rev(a)
959 ra = self.rev(a)
960 rb = self.rev(b)
960 rb = self.rev(b)
961 if ra < rb:
961 if ra < rb:
962 last = b
962 last = b
963 first = a
963 first = a
964 else:
964 else:
965 last = a
965 last = a
966 first = b
966 first = b
967
967
968 # reachable won't include stop in the list, so we have to use a parent
968 # reachable won't include stop in the list, so we have to use a parent
969 reachable = self.reachable(last, stop=self.parents(first)[0])
969 reachable = self.reachable(last, stop=self.parents(first)[0])
970 if first in reachable:
970 if first in reachable:
971 return first
971 return first
972
972
973 # calculate the distance of every node from root
973 # calculate the distance of every node from root
974 dist = {nullid: 0}
974 dist = {nullid: 0}
975 for i in xrange(self.count()):
975 for i in xrange(self.count()):
976 n = self.node(i)
976 n = self.node(i)
977 p1, p2 = self.parents(n)
977 p1, p2 = self.parents(n)
978 dist[n] = max(dist[p1], dist[p2]) + 1
978 dist[n] = max(dist[p1], dist[p2]) + 1
979
979
980 # traverse ancestors in order of decreasing distance from root
980 # traverse ancestors in order of decreasing distance from root
981 def ancestors(node):
981 def ancestors(node):
982 # we store negative distances because heap returns smallest member
982 # we store negative distances because heap returns smallest member
983 h = [(-dist[node], node)]
983 h = [(-dist[node], node)]
984 seen = {}
984 seen = {}
985 while h:
985 while h:
986 d, n = heapq.heappop(h)
986 d, n = heapq.heappop(h)
987 if n not in seen:
987 if n not in seen:
988 seen[n] = 1
988 seen[n] = 1
989 yield (-d, n)
989 yield (-d, n)
990 for p in self.parents(n):
990 for p in self.parents(n):
991 heapq.heappush(h, (-dist[p], p))
991 heapq.heappush(h, (-dist[p], p))
992
992
993 def generations(node):
993 def generations(node):
994 sg, s = None, {}
994 sg, s = None, {}
995 for g,n in ancestors(node):
995 for g,n in ancestors(node):
996 if g != sg:
996 if g != sg:
997 if sg:
997 if sg:
998 yield sg, s
998 yield sg, s
999 sg, s = g, {n:1}
999 sg, s = g, {n:1}
1000 else:
1000 else:
1001 s[n] = 1
1001 s[n] = 1
1002 yield sg, s
1002 yield sg, s
1003
1003
1004 x = generations(a)
1004 x = generations(a)
1005 y = generations(b)
1005 y = generations(b)
1006 gx = x.next()
1006 gx = x.next()
1007 gy = y.next()
1007 gy = y.next()
1008
1008
1009 # increment each ancestor list until it is closer to root than
1009 # increment each ancestor list until it is closer to root than
1010 # the other, or they match
1010 # the other, or they match
1011 while 1:
1011 while 1:
1012 #print "ancestor gen %s %s" % (gx[0], gy[0])
1012 #print "ancestor gen %s %s" % (gx[0], gy[0])
1013 if gx[0] == gy[0]:
1013 if gx[0] == gy[0]:
1014 # find the intersection
1014 # find the intersection
1015 i = [ n for n in gx[1] if n in gy[1] ]
1015 i = [ n for n in gx[1] if n in gy[1] ]
1016 if i:
1016 if i:
1017 return i[0]
1017 return i[0]
1018 else:
1018 else:
1019 #print "next"
1019 #print "next"
1020 gy = y.next()
1020 gy = y.next()
1021 gx = x.next()
1021 gx = x.next()
1022 elif gx[0] < gy[0]:
1022 elif gx[0] < gy[0]:
1023 #print "next y"
1023 #print "next y"
1024 gy = y.next()
1024 gy = y.next()
1025 else:
1025 else:
1026 #print "next x"
1026 #print "next x"
1027 gx = x.next()
1027 gx = x.next()
1028
1028
1029 def group(self, nodelist, lookup, infocollect=None):
1029 def group(self, nodelist, lookup, infocollect=None):
1030 """calculate a delta group
1030 """calculate a delta group
1031
1031
1032 Given a list of changeset revs, return a set of deltas and
1032 Given a list of changeset revs, return a set of deltas and
1033 metadata corresponding to nodes. the first delta is
1033 metadata corresponding to nodes. the first delta is
1034 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1034 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1035 have this parent as it has all history before these
1035 have this parent as it has all history before these
1036 changesets. parent is parent[0]
1036 changesets. parent is parent[0]
1037 """
1037 """
1038 revs = [self.rev(n) for n in nodelist]
1038 revs = [self.rev(n) for n in nodelist]
1039
1039
1040 # if we don't have any revisions touched by these changesets, bail
1040 # if we don't have any revisions touched by these changesets, bail
1041 if not revs:
1041 if not revs:
1042 yield changegroup.closechunk()
1042 yield changegroup.closechunk()
1043 return
1043 return
1044
1044
1045 # add the parent of the first rev
1045 # add the parent of the first rev
1046 p = self.parents(self.node(revs[0]))[0]
1046 p = self.parents(self.node(revs[0]))[0]
1047 revs.insert(0, self.rev(p))
1047 revs.insert(0, self.rev(p))
1048
1048
1049 # build deltas
1049 # build deltas
1050 for d in xrange(0, len(revs) - 1):
1050 for d in xrange(0, len(revs) - 1):
1051 a, b = revs[d], revs[d + 1]
1051 a, b = revs[d], revs[d + 1]
1052 nb = self.node(b)
1052 nb = self.node(b)
1053
1053
1054 if infocollect is not None:
1054 if infocollect is not None:
1055 infocollect(nb)
1055 infocollect(nb)
1056
1056
1057 d = self.revdiff(a, b)
1057 d = self.revdiff(a, b)
1058 p = self.parents(nb)
1058 p = self.parents(nb)
1059 meta = nb + p[0] + p[1] + lookup(nb)
1059 meta = nb + p[0] + p[1] + lookup(nb)
1060 yield changegroup.genchunk("%s%s" % (meta, d))
1060 yield changegroup.genchunk("%s%s" % (meta, d))
1061
1061
1062 yield changegroup.closechunk()
1062 yield changegroup.closechunk()
1063
1063
1064 def addgroup(self, revs, linkmapper, transaction, unique=0):
1064 def addgroup(self, revs, linkmapper, transaction, unique=0):
1065 """
1065 """
1066 add a delta group
1066 add a delta group
1067
1067
1068 given a set of deltas, add them to the revision log. the
1068 given a set of deltas, add them to the revision log. the
1069 first delta is against its parent, which should be in our
1069 first delta is against its parent, which should be in our
1070 log, the rest are against the previous delta.
1070 log, the rest are against the previous delta.
1071 """
1071 """
1072
1072
1073 #track the base of the current delta log
1073 #track the base of the current delta log
1074 r = self.count()
1074 r = self.count()
1075 t = r - 1
1075 t = r - 1
1076 node = None
1076 node = None
1077
1077
1078 base = prev = -1
1078 base = prev = -1
1079 start = end = textlen = 0
1079 start = end = textlen = 0
1080 if r:
1080 if r:
1081 end = self.end(t)
1081 end = self.end(t)
1082
1082
1083 ifh = self.opener(self.indexfile, "a+")
1083 ifh = self.opener(self.indexfile, "a+")
1084 ifh.seek(0, 2)
1084 ifh.seek(0, 2)
1085 transaction.add(self.indexfile, ifh.tell(), self.count())
1085 transaction.add(self.indexfile, ifh.tell(), self.count())
1086 if self.inlinedata():
1086 if self.inlinedata():
1087 dfh = None
1087 dfh = None
1088 else:
1088 else:
1089 transaction.add(self.datafile, end)
1089 transaction.add(self.datafile, end)
1090 dfh = self.opener(self.datafile, "a")
1090 dfh = self.opener(self.datafile, "a")
1091
1091
1092 # loop through our set of deltas
1092 # loop through our set of deltas
1093 chain = None
1093 chain = None
1094 for chunk in revs:
1094 for chunk in revs:
1095 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1095 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1096 link = linkmapper(cs)
1096 link = linkmapper(cs)
1097 if node in self.nodemap:
1097 if node in self.nodemap:
1098 # this can happen if two branches make the same change
1098 # this can happen if two branches make the same change
1099 # if unique:
1099 # if unique:
1100 # raise RevlogError(_("already have %s") % hex(node[:4]))
1100 # raise RevlogError(_("already have %s") % hex(node[:4]))
1101 chain = node
1101 chain = node
1102 continue
1102 continue
1103 delta = chunk[80:]
1103 delta = chunk[80:]
1104
1104
1105 for p in (p1, p2):
1105 for p in (p1, p2):
1106 if not p in self.nodemap:
1106 if not p in self.nodemap:
1107 raise RevlogError(_("unknown parent %s") % short(p1))
1107 raise RevlogError(_("unknown parent %s") % short(p1))
1108
1108
1109 if not chain:
1109 if not chain:
1110 # retrieve the parent revision of the delta chain
1110 # retrieve the parent revision of the delta chain
1111 chain = p1
1111 chain = p1
1112 if not chain in self.nodemap:
1112 if not chain in self.nodemap:
1113 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1113 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1114
1114
1115 # full versions are inserted when the needed deltas become
1115 # full versions are inserted when the needed deltas become
1116 # comparable to the uncompressed text or when the previous
1116 # comparable to the uncompressed text or when the previous
1117 # version is not the one we have a delta against. We use
1117 # version is not the one we have a delta against. We use
1118 # the size of the previous full rev as a proxy for the
1118 # the size of the previous full rev as a proxy for the
1119 # current size.
1119 # current size.
1120
1120
1121 if chain == prev:
1121 if chain == prev:
1122 tempd = compress(delta)
1122 tempd = compress(delta)
1123 cdelta = tempd[0] + tempd[1]
1123 cdelta = tempd[0] + tempd[1]
1124 textlen = mdiff.patchedsize(textlen, delta)
1124 textlen = mdiff.patchedsize(textlen, delta)
1125
1125
1126 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1126 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1127 # flush our writes here so we can read it in revision
1127 # flush our writes here so we can read it in revision
1128 if dfh:
1128 if dfh:
1129 dfh.flush()
1129 dfh.flush()
1130 ifh.flush()
1130 ifh.flush()
1131 text = self.revision(chain)
1131 text = self.revision(chain)
1132 text = self.patches(text, [delta])
1132 text = self.patches(text, [delta])
1133 chk = self.addrevision(text, transaction, link, p1, p2)
1133 chk = self.addrevision(text, transaction, link, p1, p2)
1134 if chk != node:
1134 if chk != node:
1135 raise RevlogError(_("consistency error adding group"))
1135 raise RevlogError(_("consistency error adding group"))
1136 textlen = len(text)
1136 textlen = len(text)
1137 else:
1137 else:
1138 if self.version == 0:
1138 if self.version == REVLOGV0:
1139 e = (end, len(cdelta), base, link, p1, p2, node)
1139 e = (end, len(cdelta), base, link, p1, p2, node)
1140 else:
1140 else:
1141 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1141 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1142 link, self.rev(p1), self.rev(p2), node)
1142 link, self.rev(p1), self.rev(p2), node)
1143 self.index.append(e)
1143 self.index.append(e)
1144 self.nodemap[node] = r
1144 self.nodemap[node] = r
1145 if self.inlinedata():
1145 if self.inlinedata():
1146 ifh.write(struct.pack(self.indexformat, *e))
1146 ifh.write(struct.pack(self.indexformat, *e))
1147 ifh.write(cdelta)
1147 ifh.write(cdelta)
1148 self.checkinlinesize(transaction, ifh)
1148 self.checkinlinesize(transaction, ifh)
1149 if not self.inlinedata():
1149 if not self.inlinedata():
1150 dfh = self.opener(self.datafile, "a")
1150 dfh = self.opener(self.datafile, "a")
1151 ifh = self.opener(self.indexfile, "a")
1151 ifh = self.opener(self.indexfile, "a")
1152 else:
1152 else:
1153 if not dfh:
1153 if not dfh:
1154 # addrevision switched from inline to conventional
1154 # addrevision switched from inline to conventional
1155 # reopen the index
1155 # reopen the index
1156 dfh = self.opener(self.datafile, "a")
1156 dfh = self.opener(self.datafile, "a")
1157 ifh = self.opener(self.indexfile, "a")
1157 ifh = self.opener(self.indexfile, "a")
1158 dfh.write(cdelta)
1158 dfh.write(cdelta)
1159 ifh.write(struct.pack(self.indexformat, *e))
1159 ifh.write(struct.pack(self.indexformat, *e))
1160
1160
1161 t, r, chain, prev = r, r + 1, node, node
1161 t, r, chain, prev = r, r + 1, node, node
1162 base = self.base(t)
1162 base = self.base(t)
1163 start = self.start(base)
1163 start = self.start(base)
1164 end = self.end(t)
1164 end = self.end(t)
1165
1165
1166 if node is None:
1166 if node is None:
1167 raise RevlogError(_("group to be added is empty"))
1167 raise RevlogError(_("group to be added is empty"))
1168 return node
1168 return node
1169
1169
1170 def strip(self, rev, minlink):
1170 def strip(self, rev, minlink):
1171 if self.count() == 0 or rev >= self.count():
1171 if self.count() == 0 or rev >= self.count():
1172 return
1172 return
1173
1173
1174 if isinstance(self.index, lazyindex):
1174 if isinstance(self.index, lazyindex):
1175 self.loadindexmap()
1175 self.loadindexmap()
1176
1176
1177 # When stripping away a revision, we need to make sure it
1177 # When stripping away a revision, we need to make sure it
1178 # does not actually belong to an older changeset.
1178 # does not actually belong to an older changeset.
1179 # The minlink parameter defines the oldest revision
1179 # The minlink parameter defines the oldest revision
1180 # we're allowed to strip away.
1180 # we're allowed to strip away.
1181 while minlink > self.index[rev][-4]:
1181 while minlink > self.index[rev][-4]:
1182 rev += 1
1182 rev += 1
1183 if rev >= self.count():
1183 if rev >= self.count():
1184 return
1184 return
1185
1185
1186 # first truncate the files on disk
1186 # first truncate the files on disk
1187 end = self.start(rev)
1187 end = self.start(rev)
1188 if not self.inlinedata():
1188 if not self.inlinedata():
1189 df = self.opener(self.datafile, "a")
1189 df = self.opener(self.datafile, "a")
1190 df.truncate(end)
1190 df.truncate(end)
1191 end = rev * struct.calcsize(self.indexformat)
1191 end = rev * struct.calcsize(self.indexformat)
1192 else:
1192 else:
1193 end += rev * struct.calcsize(self.indexformat)
1193 end += rev * struct.calcsize(self.indexformat)
1194
1194
1195 indexf = self.opener(self.indexfile, "a")
1195 indexf = self.opener(self.indexfile, "a")
1196 indexf.truncate(end)
1196 indexf.truncate(end)
1197
1197
1198 # then reset internal state in memory to forget those revisions
1198 # then reset internal state in memory to forget those revisions
1199 self.cache = None
1199 self.cache = None
1200 self.chunkcache = None
1200 self.chunkcache = None
1201 for x in xrange(rev, self.count()):
1201 for x in xrange(rev, self.count()):
1202 del self.nodemap[self.node(x)]
1202 del self.nodemap[self.node(x)]
1203
1203
1204 del self.index[rev:]
1204 del self.index[rev:]
1205
1205
1206 def checksize(self):
1206 def checksize(self):
1207 expected = 0
1207 expected = 0
1208 if self.count():
1208 if self.count():
1209 expected = self.end(self.count() - 1)
1209 expected = self.end(self.count() - 1)
1210
1210
1211 try:
1211 try:
1212 f = self.opener(self.datafile)
1212 f = self.opener(self.datafile)
1213 f.seek(0, 2)
1213 f.seek(0, 2)
1214 actual = f.tell()
1214 actual = f.tell()
1215 dd = actual - expected
1215 dd = actual - expected
1216 except IOError, inst:
1216 except IOError, inst:
1217 if inst.errno != errno.ENOENT:
1217 if inst.errno != errno.ENOENT:
1218 raise
1218 raise
1219 dd = 0
1219 dd = 0
1220
1220
1221 try:
1221 try:
1222 f = self.opener(self.indexfile)
1222 f = self.opener(self.indexfile)
1223 f.seek(0, 2)
1223 f.seek(0, 2)
1224 actual = f.tell()
1224 actual = f.tell()
1225 s = struct.calcsize(self.indexformat)
1225 s = struct.calcsize(self.indexformat)
1226 i = actual / s
1226 i = actual / s
1227 di = actual - (i * s)
1227 di = actual - (i * s)
1228 if self.inlinedata():
1228 if self.inlinedata():
1229 databytes = 0
1229 databytes = 0
1230 for r in xrange(self.count()):
1230 for r in xrange(self.count()):
1231 databytes += self.length(r)
1231 databytes += self.length(r)
1232 dd = 0
1232 dd = 0
1233 di = actual - self.count() * s - databytes
1233 di = actual - self.count() * s - databytes
1234 except IOError, inst:
1234 except IOError, inst:
1235 if inst.errno != errno.ENOENT:
1235 if inst.errno != errno.ENOENT:
1236 raise
1236 raise
1237 di = 0
1237 di = 0
1238
1238
1239 return (dd, di)
1239 return (dd, di)
1240
1240
1241
1241
General Comments 0
You need to be logged in to leave comments. Login now