##// END OF EJS Templates
Merge with cmason
Thomas Arendsen Hein -
r2099:cd7cb896 merge default
parent child Browse files
Show More
@@ -1,1978 +1,1977 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui revlog")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui revlog")
15
15
16 class localrepository(object):
16 class localrepository(object):
17 def __del__(self):
17 def __del__(self):
18 self.transhandle = None
18 self.transhandle = None
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 if not path:
20 if not path:
21 p = os.getcwd()
21 p = os.getcwd()
22 while not os.path.isdir(os.path.join(p, ".hg")):
22 while not os.path.isdir(os.path.join(p, ".hg")):
23 oldp = p
23 oldp = p
24 p = os.path.dirname(p)
24 p = os.path.dirname(p)
25 if p == oldp:
25 if p == oldp:
26 raise repo.RepoError(_("no repo found"))
26 raise repo.RepoError(_("no repo found"))
27 path = p
27 path = p
28 self.path = os.path.join(path, ".hg")
28 self.path = os.path.join(path, ".hg")
29
29
30 if not create and not os.path.isdir(self.path):
30 if not create and not os.path.isdir(self.path):
31 raise repo.RepoError(_("repository %s not found") % path)
31 raise repo.RepoError(_("repository %s not found") % path)
32
32
33 self.root = os.path.abspath(path)
33 self.root = os.path.abspath(path)
34 self.origroot = path
34 self.origroot = path
35 self.ui = ui.ui(parentui=parentui)
35 self.ui = ui.ui(parentui=parentui)
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 except IOError:
41 except IOError:
42 pass
42 pass
43
43
44 v = self.ui.revlogopts
44 v = self.ui.revlogopts
45 self.revlogversion = int(v.get('format', 0))
45 self.revlogversion = int(v.get('format', 0))
46 flags = 0
46 flags = 0
47 for x in v.get('flags', "").split():
47 for x in v.get('flags', "").split():
48 flags |= revlog.flagstr(x)
48 flags |= revlog.flagstr(x)
49
49
50 v = self.revlogversion | flags
50 v = self.revlogversion | flags
51 self.manifest = manifest.manifest(self.opener, v)
51 self.manifest = manifest.manifest(self.opener, v)
52 self.changelog = changelog.changelog(self.opener, v)
52 self.changelog = changelog.changelog(self.opener, v)
53
53
54 # the changelog might not have the inline index flag
54 # the changelog might not have the inline index flag
55 # on. If the format of the changelog is the same as found in
55 # on. If the format of the changelog is the same as found in
56 # .hgrc, apply any flags found in the .hgrc as well.
56 # .hgrc, apply any flags found in the .hgrc as well.
57 # Otherwise, just version from the changelog
57 # Otherwise, just version from the changelog
58 v = self.changelog.version
58 v = self.changelog.version
59 if v == self.revlogversion:
59 if v == self.revlogversion:
60 v |= flags
60 v |= flags
61 self.revlogversion = v
61 self.revlogversion = v
62
62
63 self.tagscache = None
63 self.tagscache = None
64 self.nodetagscache = None
64 self.nodetagscache = None
65 self.encodepats = None
65 self.encodepats = None
66 self.decodepats = None
66 self.decodepats = None
67 self.transhandle = None
67 self.transhandle = None
68
68
69 if create:
69 if create:
70 os.mkdir(self.path)
70 os.mkdir(self.path)
71 os.mkdir(self.join("data"))
71 os.mkdir(self.join("data"))
72
72
73 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
73 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
74 def hook(self, name, throw=False, **args):
74 def hook(self, name, throw=False, **args):
75 def runhook(name, cmd):
75 def runhook(name, cmd):
76 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
76 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
77 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
77 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
78 [(k.upper(), v) for k, v in args.iteritems()])
78 [(k.upper(), v) for k, v in args.iteritems()])
79 r = util.system(cmd, environ=env, cwd=self.root)
79 r = util.system(cmd, environ=env, cwd=self.root)
80 if r:
80 if r:
81 desc, r = util.explain_exit(r)
81 desc, r = util.explain_exit(r)
82 if throw:
82 if throw:
83 raise util.Abort(_('%s hook %s') % (name, desc))
83 raise util.Abort(_('%s hook %s') % (name, desc))
84 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
84 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
85 return False
85 return False
86 return True
86 return True
87
87
88 r = True
88 r = True
89 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
89 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
90 if hname.split(".", 1)[0] == name and cmd]
90 if hname.split(".", 1)[0] == name and cmd]
91 hooks.sort()
91 hooks.sort()
92 for hname, cmd in hooks:
92 for hname, cmd in hooks:
93 r = runhook(hname, cmd) and r
93 r = runhook(hname, cmd) and r
94 return r
94 return r
95
95
96 def tags(self):
96 def tags(self):
97 '''return a mapping of tag to node'''
97 '''return a mapping of tag to node'''
98 if not self.tagscache:
98 if not self.tagscache:
99 self.tagscache = {}
99 self.tagscache = {}
100
100
101 def parsetag(line, context):
101 def parsetag(line, context):
102 if not line:
102 if not line:
103 return
103 return
104 s = l.split(" ", 1)
104 s = l.split(" ", 1)
105 if len(s) != 2:
105 if len(s) != 2:
106 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
106 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
107 return
107 return
108 node, key = s
108 node, key = s
109 try:
109 try:
110 bin_n = bin(node)
110 bin_n = bin(node)
111 except TypeError:
111 except TypeError:
112 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
112 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
113 return
113 return
114 if bin_n not in self.changelog.nodemap:
114 if bin_n not in self.changelog.nodemap:
115 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
115 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
116 return
116 return
117 self.tagscache[key.strip()] = bin_n
117 self.tagscache[key.strip()] = bin_n
118
118
119 # read each head of the tags file, ending with the tip
119 # read each head of the tags file, ending with the tip
120 # and add each tag found to the map, with "newer" ones
120 # and add each tag found to the map, with "newer" ones
121 # taking precedence
121 # taking precedence
122 fl = self.file(".hgtags")
122 fl = self.file(".hgtags")
123 h = fl.heads()
123 h = fl.heads()
124 h.reverse()
124 h.reverse()
125 for r in h:
125 for r in h:
126 count = 0
126 count = 0
127 for l in fl.read(r).splitlines():
127 for l in fl.read(r).splitlines():
128 count += 1
128 count += 1
129 parsetag(l, ".hgtags:%d" % count)
129 parsetag(l, ".hgtags:%d" % count)
130
130
131 try:
131 try:
132 f = self.opener("localtags")
132 f = self.opener("localtags")
133 count = 0
133 count = 0
134 for l in f:
134 for l in f:
135 count += 1
135 count += 1
136 parsetag(l, "localtags:%d" % count)
136 parsetag(l, "localtags:%d" % count)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 self.tagscache['tip'] = self.changelog.tip()
140 self.tagscache['tip'] = self.changelog.tip()
141
141
142 return self.tagscache
142 return self.tagscache
143
143
144 def tagslist(self):
144 def tagslist(self):
145 '''return a list of tags ordered by revision'''
145 '''return a list of tags ordered by revision'''
146 l = []
146 l = []
147 for t, n in self.tags().items():
147 for t, n in self.tags().items():
148 try:
148 try:
149 r = self.changelog.rev(n)
149 r = self.changelog.rev(n)
150 except:
150 except:
151 r = -2 # sort to the beginning of the list if unknown
151 r = -2 # sort to the beginning of the list if unknown
152 l.append((r, t, n))
152 l.append((r, t, n))
153 l.sort()
153 l.sort()
154 return [(t, n) for r, t, n in l]
154 return [(t, n) for r, t, n in l]
155
155
156 def nodetags(self, node):
156 def nodetags(self, node):
157 '''return the tags associated with a node'''
157 '''return the tags associated with a node'''
158 if not self.nodetagscache:
158 if not self.nodetagscache:
159 self.nodetagscache = {}
159 self.nodetagscache = {}
160 for t, n in self.tags().items():
160 for t, n in self.tags().items():
161 self.nodetagscache.setdefault(n, []).append(t)
161 self.nodetagscache.setdefault(n, []).append(t)
162 return self.nodetagscache.get(node, [])
162 return self.nodetagscache.get(node, [])
163
163
164 def lookup(self, key):
164 def lookup(self, key):
165 try:
165 try:
166 return self.tags()[key]
166 return self.tags()[key]
167 except KeyError:
167 except KeyError:
168 try:
168 try:
169 return self.changelog.lookup(key)
169 return self.changelog.lookup(key)
170 except:
170 except:
171 raise
172 raise repo.RepoError(_("unknown revision '%s'") % key)
171 raise repo.RepoError(_("unknown revision '%s'") % key)
173
172
174 def dev(self):
173 def dev(self):
175 return os.stat(self.path).st_dev
174 return os.stat(self.path).st_dev
176
175
177 def local(self):
176 def local(self):
178 return True
177 return True
179
178
180 def join(self, f):
179 def join(self, f):
181 return os.path.join(self.path, f)
180 return os.path.join(self.path, f)
182
181
183 def wjoin(self, f):
182 def wjoin(self, f):
184 return os.path.join(self.root, f)
183 return os.path.join(self.root, f)
185
184
186 def file(self, f):
185 def file(self, f):
187 if f[0] == '/':
186 if f[0] == '/':
188 f = f[1:]
187 f = f[1:]
189 return filelog.filelog(self.opener, f, self.revlogversion)
188 return filelog.filelog(self.opener, f, self.revlogversion)
190
189
191 def getcwd(self):
190 def getcwd(self):
192 return self.dirstate.getcwd()
191 return self.dirstate.getcwd()
193
192
194 def wfile(self, f, mode='r'):
193 def wfile(self, f, mode='r'):
195 return self.wopener(f, mode)
194 return self.wopener(f, mode)
196
195
197 def wread(self, filename):
196 def wread(self, filename):
198 if self.encodepats == None:
197 if self.encodepats == None:
199 l = []
198 l = []
200 for pat, cmd in self.ui.configitems("encode"):
199 for pat, cmd in self.ui.configitems("encode"):
201 mf = util.matcher(self.root, "", [pat], [], [])[1]
200 mf = util.matcher(self.root, "", [pat], [], [])[1]
202 l.append((mf, cmd))
201 l.append((mf, cmd))
203 self.encodepats = l
202 self.encodepats = l
204
203
205 data = self.wopener(filename, 'r').read()
204 data = self.wopener(filename, 'r').read()
206
205
207 for mf, cmd in self.encodepats:
206 for mf, cmd in self.encodepats:
208 if mf(filename):
207 if mf(filename):
209 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
208 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
210 data = util.filter(data, cmd)
209 data = util.filter(data, cmd)
211 break
210 break
212
211
213 return data
212 return data
214
213
215 def wwrite(self, filename, data, fd=None):
214 def wwrite(self, filename, data, fd=None):
216 if self.decodepats == None:
215 if self.decodepats == None:
217 l = []
216 l = []
218 for pat, cmd in self.ui.configitems("decode"):
217 for pat, cmd in self.ui.configitems("decode"):
219 mf = util.matcher(self.root, "", [pat], [], [])[1]
218 mf = util.matcher(self.root, "", [pat], [], [])[1]
220 l.append((mf, cmd))
219 l.append((mf, cmd))
221 self.decodepats = l
220 self.decodepats = l
222
221
223 for mf, cmd in self.decodepats:
222 for mf, cmd in self.decodepats:
224 if mf(filename):
223 if mf(filename):
225 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
224 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
226 data = util.filter(data, cmd)
225 data = util.filter(data, cmd)
227 break
226 break
228
227
229 if fd:
228 if fd:
230 return fd.write(data)
229 return fd.write(data)
231 return self.wopener(filename, 'w').write(data)
230 return self.wopener(filename, 'w').write(data)
232
231
233 def transaction(self):
232 def transaction(self):
234 tr = self.transhandle
233 tr = self.transhandle
235 if tr != None and tr.running():
234 if tr != None and tr.running():
236 return tr.nest()
235 return tr.nest()
237
236
238 # save dirstate for undo
237 # save dirstate for undo
239 try:
238 try:
240 ds = self.opener("dirstate").read()
239 ds = self.opener("dirstate").read()
241 except IOError:
240 except IOError:
242 ds = ""
241 ds = ""
243 self.opener("journal.dirstate", "w").write(ds)
242 self.opener("journal.dirstate", "w").write(ds)
244
243
245 tr = transaction.transaction(self.ui.warn, self.opener,
244 tr = transaction.transaction(self.ui.warn, self.opener,
246 self.join("journal"),
245 self.join("journal"),
247 aftertrans(self.path))
246 aftertrans(self.path))
248 self.transhandle = tr
247 self.transhandle = tr
249 return tr
248 return tr
250
249
251 def recover(self):
250 def recover(self):
252 l = self.lock()
251 l = self.lock()
253 if os.path.exists(self.join("journal")):
252 if os.path.exists(self.join("journal")):
254 self.ui.status(_("rolling back interrupted transaction\n"))
253 self.ui.status(_("rolling back interrupted transaction\n"))
255 transaction.rollback(self.opener, self.join("journal"))
254 transaction.rollback(self.opener, self.join("journal"))
256 self.reload()
255 self.reload()
257 return True
256 return True
258 else:
257 else:
259 self.ui.warn(_("no interrupted transaction available\n"))
258 self.ui.warn(_("no interrupted transaction available\n"))
260 return False
259 return False
261
260
262 def undo(self, wlock=None):
261 def undo(self, wlock=None):
263 if not wlock:
262 if not wlock:
264 wlock = self.wlock()
263 wlock = self.wlock()
265 l = self.lock()
264 l = self.lock()
266 if os.path.exists(self.join("undo")):
265 if os.path.exists(self.join("undo")):
267 self.ui.status(_("rolling back last transaction\n"))
266 self.ui.status(_("rolling back last transaction\n"))
268 transaction.rollback(self.opener, self.join("undo"))
267 transaction.rollback(self.opener, self.join("undo"))
269 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
268 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
270 self.reload()
269 self.reload()
271 self.wreload()
270 self.wreload()
272 else:
271 else:
273 self.ui.warn(_("no undo information available\n"))
272 self.ui.warn(_("no undo information available\n"))
274
273
275 def wreload(self):
274 def wreload(self):
276 self.dirstate.read()
275 self.dirstate.read()
277
276
278 def reload(self):
277 def reload(self):
279 self.changelog.load()
278 self.changelog.load()
280 self.manifest.load()
279 self.manifest.load()
281 self.tagscache = None
280 self.tagscache = None
282 self.nodetagscache = None
281 self.nodetagscache = None
283
282
284 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
283 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
285 desc=None):
284 desc=None):
286 try:
285 try:
287 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
286 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
288 except lock.LockHeld, inst:
287 except lock.LockHeld, inst:
289 if not wait:
288 if not wait:
290 raise
289 raise
291 self.ui.warn(_("waiting for lock on %s held by %s\n") %
290 self.ui.warn(_("waiting for lock on %s held by %s\n") %
292 (desc, inst.args[0]))
291 (desc, inst.args[0]))
293 # default to 600 seconds timeout
292 # default to 600 seconds timeout
294 l = lock.lock(self.join(lockname),
293 l = lock.lock(self.join(lockname),
295 int(self.ui.config("ui", "timeout") or 600),
294 int(self.ui.config("ui", "timeout") or 600),
296 releasefn, desc=desc)
295 releasefn, desc=desc)
297 if acquirefn:
296 if acquirefn:
298 acquirefn()
297 acquirefn()
299 return l
298 return l
300
299
301 def lock(self, wait=1):
300 def lock(self, wait=1):
302 return self.do_lock("lock", wait, acquirefn=self.reload,
301 return self.do_lock("lock", wait, acquirefn=self.reload,
303 desc=_('repository %s') % self.origroot)
302 desc=_('repository %s') % self.origroot)
304
303
305 def wlock(self, wait=1):
304 def wlock(self, wait=1):
306 return self.do_lock("wlock", wait, self.dirstate.write,
305 return self.do_lock("wlock", wait, self.dirstate.write,
307 self.wreload,
306 self.wreload,
308 desc=_('working directory of %s') % self.origroot)
307 desc=_('working directory of %s') % self.origroot)
309
308
310 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
309 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
311 "determine whether a new filenode is needed"
310 "determine whether a new filenode is needed"
312 fp1 = manifest1.get(filename, nullid)
311 fp1 = manifest1.get(filename, nullid)
313 fp2 = manifest2.get(filename, nullid)
312 fp2 = manifest2.get(filename, nullid)
314
313
315 if fp2 != nullid:
314 if fp2 != nullid:
316 # is one parent an ancestor of the other?
315 # is one parent an ancestor of the other?
317 fpa = filelog.ancestor(fp1, fp2)
316 fpa = filelog.ancestor(fp1, fp2)
318 if fpa == fp1:
317 if fpa == fp1:
319 fp1, fp2 = fp2, nullid
318 fp1, fp2 = fp2, nullid
320 elif fpa == fp2:
319 elif fpa == fp2:
321 fp2 = nullid
320 fp2 = nullid
322
321
323 # is the file unmodified from the parent? report existing entry
322 # is the file unmodified from the parent? report existing entry
324 if fp2 == nullid and text == filelog.read(fp1):
323 if fp2 == nullid and text == filelog.read(fp1):
325 return (fp1, None, None)
324 return (fp1, None, None)
326
325
327 return (None, fp1, fp2)
326 return (None, fp1, fp2)
328
327
329 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
328 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
330 orig_parent = self.dirstate.parents()[0] or nullid
329 orig_parent = self.dirstate.parents()[0] or nullid
331 p1 = p1 or self.dirstate.parents()[0] or nullid
330 p1 = p1 or self.dirstate.parents()[0] or nullid
332 p2 = p2 or self.dirstate.parents()[1] or nullid
331 p2 = p2 or self.dirstate.parents()[1] or nullid
333 c1 = self.changelog.read(p1)
332 c1 = self.changelog.read(p1)
334 c2 = self.changelog.read(p2)
333 c2 = self.changelog.read(p2)
335 m1 = self.manifest.read(c1[0])
334 m1 = self.manifest.read(c1[0])
336 mf1 = self.manifest.readflags(c1[0])
335 mf1 = self.manifest.readflags(c1[0])
337 m2 = self.manifest.read(c2[0])
336 m2 = self.manifest.read(c2[0])
338 changed = []
337 changed = []
339
338
340 if orig_parent == p1:
339 if orig_parent == p1:
341 update_dirstate = 1
340 update_dirstate = 1
342 else:
341 else:
343 update_dirstate = 0
342 update_dirstate = 0
344
343
345 if not wlock:
344 if not wlock:
346 wlock = self.wlock()
345 wlock = self.wlock()
347 l = self.lock()
346 l = self.lock()
348 tr = self.transaction()
347 tr = self.transaction()
349 mm = m1.copy()
348 mm = m1.copy()
350 mfm = mf1.copy()
349 mfm = mf1.copy()
351 linkrev = self.changelog.count()
350 linkrev = self.changelog.count()
352 for f in files:
351 for f in files:
353 try:
352 try:
354 t = self.wread(f)
353 t = self.wread(f)
355 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
354 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
356 r = self.file(f)
355 r = self.file(f)
357 mfm[f] = tm
356 mfm[f] = tm
358
357
359 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
358 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
360 if entry:
359 if entry:
361 mm[f] = entry
360 mm[f] = entry
362 continue
361 continue
363
362
364 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
363 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
365 changed.append(f)
364 changed.append(f)
366 if update_dirstate:
365 if update_dirstate:
367 self.dirstate.update([f], "n")
366 self.dirstate.update([f], "n")
368 except IOError:
367 except IOError:
369 try:
368 try:
370 del mm[f]
369 del mm[f]
371 del mfm[f]
370 del mfm[f]
372 if update_dirstate:
371 if update_dirstate:
373 self.dirstate.forget([f])
372 self.dirstate.forget([f])
374 except:
373 except:
375 # deleted from p2?
374 # deleted from p2?
376 pass
375 pass
377
376
378 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
377 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
379 user = user or self.ui.username()
378 user = user or self.ui.username()
380 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
379 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
381 tr.close()
380 tr.close()
382 if update_dirstate:
381 if update_dirstate:
383 self.dirstate.setparents(n, nullid)
382 self.dirstate.setparents(n, nullid)
384
383
385 def commit(self, files=None, text="", user=None, date=None,
384 def commit(self, files=None, text="", user=None, date=None,
386 match=util.always, force=False, lock=None, wlock=None):
385 match=util.always, force=False, lock=None, wlock=None):
387 commit = []
386 commit = []
388 remove = []
387 remove = []
389 changed = []
388 changed = []
390
389
391 if files:
390 if files:
392 for f in files:
391 for f in files:
393 s = self.dirstate.state(f)
392 s = self.dirstate.state(f)
394 if s in 'nmai':
393 if s in 'nmai':
395 commit.append(f)
394 commit.append(f)
396 elif s == 'r':
395 elif s == 'r':
397 remove.append(f)
396 remove.append(f)
398 else:
397 else:
399 self.ui.warn(_("%s not tracked!\n") % f)
398 self.ui.warn(_("%s not tracked!\n") % f)
400 else:
399 else:
401 modified, added, removed, deleted, unknown = self.changes(match=match)
400 modified, added, removed, deleted, unknown = self.changes(match=match)
402 commit = modified + added
401 commit = modified + added
403 remove = removed
402 remove = removed
404
403
405 p1, p2 = self.dirstate.parents()
404 p1, p2 = self.dirstate.parents()
406 c1 = self.changelog.read(p1)
405 c1 = self.changelog.read(p1)
407 c2 = self.changelog.read(p2)
406 c2 = self.changelog.read(p2)
408 m1 = self.manifest.read(c1[0])
407 m1 = self.manifest.read(c1[0])
409 mf1 = self.manifest.readflags(c1[0])
408 mf1 = self.manifest.readflags(c1[0])
410 m2 = self.manifest.read(c2[0])
409 m2 = self.manifest.read(c2[0])
411
410
412 if not commit and not remove and not force and p2 == nullid:
411 if not commit and not remove and not force and p2 == nullid:
413 self.ui.status(_("nothing changed\n"))
412 self.ui.status(_("nothing changed\n"))
414 return None
413 return None
415
414
416 xp1 = hex(p1)
415 xp1 = hex(p1)
417 if p2 == nullid: xp2 = ''
416 if p2 == nullid: xp2 = ''
418 else: xp2 = hex(p2)
417 else: xp2 = hex(p2)
419
418
420 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
419 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
421
420
422 if not wlock:
421 if not wlock:
423 wlock = self.wlock()
422 wlock = self.wlock()
424 if not lock:
423 if not lock:
425 lock = self.lock()
424 lock = self.lock()
426 tr = self.transaction()
425 tr = self.transaction()
427
426
428 # check in files
427 # check in files
429 new = {}
428 new = {}
430 linkrev = self.changelog.count()
429 linkrev = self.changelog.count()
431 commit.sort()
430 commit.sort()
432 for f in commit:
431 for f in commit:
433 self.ui.note(f + "\n")
432 self.ui.note(f + "\n")
434 try:
433 try:
435 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
434 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
436 t = self.wread(f)
435 t = self.wread(f)
437 except IOError:
436 except IOError:
438 self.ui.warn(_("trouble committing %s!\n") % f)
437 self.ui.warn(_("trouble committing %s!\n") % f)
439 raise
438 raise
440
439
441 r = self.file(f)
440 r = self.file(f)
442
441
443 meta = {}
442 meta = {}
444 cp = self.dirstate.copied(f)
443 cp = self.dirstate.copied(f)
445 if cp:
444 if cp:
446 meta["copy"] = cp
445 meta["copy"] = cp
447 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
446 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
448 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
447 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
449 fp1, fp2 = nullid, nullid
448 fp1, fp2 = nullid, nullid
450 else:
449 else:
451 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
450 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
452 if entry:
451 if entry:
453 new[f] = entry
452 new[f] = entry
454 continue
453 continue
455
454
456 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
455 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
457 # remember what we've added so that we can later calculate
456 # remember what we've added so that we can later calculate
458 # the files to pull from a set of changesets
457 # the files to pull from a set of changesets
459 changed.append(f)
458 changed.append(f)
460
459
461 # update manifest
460 # update manifest
462 m1 = m1.copy()
461 m1 = m1.copy()
463 m1.update(new)
462 m1.update(new)
464 for f in remove:
463 for f in remove:
465 if f in m1:
464 if f in m1:
466 del m1[f]
465 del m1[f]
467 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
466 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
468 (new, remove))
467 (new, remove))
469
468
470 # add changeset
469 # add changeset
471 new = new.keys()
470 new = new.keys()
472 new.sort()
471 new.sort()
473
472
474 user = user or self.ui.username()
473 user = user or self.ui.username()
475 if not text:
474 if not text:
476 edittext = [""]
475 edittext = [""]
477 if p2 != nullid:
476 if p2 != nullid:
478 edittext.append("HG: branch merge")
477 edittext.append("HG: branch merge")
479 edittext.extend(["HG: changed %s" % f for f in changed])
478 edittext.extend(["HG: changed %s" % f for f in changed])
480 edittext.extend(["HG: removed %s" % f for f in remove])
479 edittext.extend(["HG: removed %s" % f for f in remove])
481 if not changed and not remove:
480 if not changed and not remove:
482 edittext.append("HG: no files changed")
481 edittext.append("HG: no files changed")
483 edittext.append("")
482 edittext.append("")
484 # run editor in the repository root
483 # run editor in the repository root
485 olddir = os.getcwd()
484 olddir = os.getcwd()
486 os.chdir(self.root)
485 os.chdir(self.root)
487 edittext = self.ui.edit("\n".join(edittext), user)
486 edittext = self.ui.edit("\n".join(edittext), user)
488 os.chdir(olddir)
487 os.chdir(olddir)
489 if not edittext.rstrip():
488 if not edittext.rstrip():
490 return None
489 return None
491 text = edittext
490 text = edittext
492
491
493 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
492 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
494 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
493 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
495 parent2=xp2)
494 parent2=xp2)
496 tr.close()
495 tr.close()
497
496
498 self.dirstate.setparents(n)
497 self.dirstate.setparents(n)
499 self.dirstate.update(new, "n")
498 self.dirstate.update(new, "n")
500 self.dirstate.forget(remove)
499 self.dirstate.forget(remove)
501
500
502 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
501 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
503 return n
502 return n
504
503
505 def walk(self, node=None, files=[], match=util.always, badmatch=None):
504 def walk(self, node=None, files=[], match=util.always, badmatch=None):
506 if node:
505 if node:
507 fdict = dict.fromkeys(files)
506 fdict = dict.fromkeys(files)
508 for fn in self.manifest.read(self.changelog.read(node)[0]):
507 for fn in self.manifest.read(self.changelog.read(node)[0]):
509 fdict.pop(fn, None)
508 fdict.pop(fn, None)
510 if match(fn):
509 if match(fn):
511 yield 'm', fn
510 yield 'm', fn
512 for fn in fdict:
511 for fn in fdict:
513 if badmatch and badmatch(fn):
512 if badmatch and badmatch(fn):
514 if match(fn):
513 if match(fn):
515 yield 'b', fn
514 yield 'b', fn
516 else:
515 else:
517 self.ui.warn(_('%s: No such file in rev %s\n') % (
516 self.ui.warn(_('%s: No such file in rev %s\n') % (
518 util.pathto(self.getcwd(), fn), short(node)))
517 util.pathto(self.getcwd(), fn), short(node)))
519 else:
518 else:
520 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
519 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
521 yield src, fn
520 yield src, fn
522
521
523 def changes(self, node1=None, node2=None, files=[], match=util.always,
522 def changes(self, node1=None, node2=None, files=[], match=util.always,
524 wlock=None, show_ignored=None):
523 wlock=None, show_ignored=None):
525 """return changes between two nodes or node and working directory
524 """return changes between two nodes or node and working directory
526
525
527 If node1 is None, use the first dirstate parent instead.
526 If node1 is None, use the first dirstate parent instead.
528 If node2 is None, compare node1 with working directory.
527 If node2 is None, compare node1 with working directory.
529 """
528 """
530
529
531 def fcmp(fn, mf):
530 def fcmp(fn, mf):
532 t1 = self.wread(fn)
531 t1 = self.wread(fn)
533 t2 = self.file(fn).read(mf.get(fn, nullid))
532 t2 = self.file(fn).read(mf.get(fn, nullid))
534 return cmp(t1, t2)
533 return cmp(t1, t2)
535
534
536 def mfmatches(node):
535 def mfmatches(node):
537 change = self.changelog.read(node)
536 change = self.changelog.read(node)
538 mf = dict(self.manifest.read(change[0]))
537 mf = dict(self.manifest.read(change[0]))
539 for fn in mf.keys():
538 for fn in mf.keys():
540 if not match(fn):
539 if not match(fn):
541 del mf[fn]
540 del mf[fn]
542 return mf
541 return mf
543
542
544 if node1:
543 if node1:
545 # read the manifest from node1 before the manifest from node2,
544 # read the manifest from node1 before the manifest from node2,
546 # so that we'll hit the manifest cache if we're going through
545 # so that we'll hit the manifest cache if we're going through
547 # all the revisions in parent->child order.
546 # all the revisions in parent->child order.
548 mf1 = mfmatches(node1)
547 mf1 = mfmatches(node1)
549
548
550 # are we comparing the working directory?
549 # are we comparing the working directory?
551 if not node2:
550 if not node2:
552 if not wlock:
551 if not wlock:
553 try:
552 try:
554 wlock = self.wlock(wait=0)
553 wlock = self.wlock(wait=0)
555 except lock.LockException:
554 except lock.LockException:
556 wlock = None
555 wlock = None
557 lookup, modified, added, removed, deleted, unknown, ignored = (
556 lookup, modified, added, removed, deleted, unknown, ignored = (
558 self.dirstate.changes(files, match, show_ignored))
557 self.dirstate.changes(files, match, show_ignored))
559
558
560 # are we comparing working dir against its parent?
559 # are we comparing working dir against its parent?
561 if not node1:
560 if not node1:
562 if lookup:
561 if lookup:
563 # do a full compare of any files that might have changed
562 # do a full compare of any files that might have changed
564 mf2 = mfmatches(self.dirstate.parents()[0])
563 mf2 = mfmatches(self.dirstate.parents()[0])
565 for f in lookup:
564 for f in lookup:
566 if fcmp(f, mf2):
565 if fcmp(f, mf2):
567 modified.append(f)
566 modified.append(f)
568 elif wlock is not None:
567 elif wlock is not None:
569 self.dirstate.update([f], "n")
568 self.dirstate.update([f], "n")
570 else:
569 else:
571 # we are comparing working dir against non-parent
570 # we are comparing working dir against non-parent
572 # generate a pseudo-manifest for the working dir
571 # generate a pseudo-manifest for the working dir
573 mf2 = mfmatches(self.dirstate.parents()[0])
572 mf2 = mfmatches(self.dirstate.parents()[0])
574 for f in lookup + modified + added:
573 for f in lookup + modified + added:
575 mf2[f] = ""
574 mf2[f] = ""
576 for f in removed:
575 for f in removed:
577 if f in mf2:
576 if f in mf2:
578 del mf2[f]
577 del mf2[f]
579 else:
578 else:
580 # we are comparing two revisions
579 # we are comparing two revisions
581 deleted, unknown, ignored = [], [], []
580 deleted, unknown, ignored = [], [], []
582 mf2 = mfmatches(node2)
581 mf2 = mfmatches(node2)
583
582
584 if node1:
583 if node1:
585 # flush lists from dirstate before comparing manifests
584 # flush lists from dirstate before comparing manifests
586 modified, added = [], []
585 modified, added = [], []
587
586
588 for fn in mf2:
587 for fn in mf2:
589 if mf1.has_key(fn):
588 if mf1.has_key(fn):
590 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
589 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
591 modified.append(fn)
590 modified.append(fn)
592 del mf1[fn]
591 del mf1[fn]
593 else:
592 else:
594 added.append(fn)
593 added.append(fn)
595
594
596 removed = mf1.keys()
595 removed = mf1.keys()
597
596
598 # sort and return results:
597 # sort and return results:
599 for l in modified, added, removed, deleted, unknown, ignored:
598 for l in modified, added, removed, deleted, unknown, ignored:
600 l.sort()
599 l.sort()
601 if show_ignored is None:
600 if show_ignored is None:
602 return (modified, added, removed, deleted, unknown)
601 return (modified, added, removed, deleted, unknown)
603 else:
602 else:
604 return (modified, added, removed, deleted, unknown, ignored)
603 return (modified, added, removed, deleted, unknown, ignored)
605
604
606 def add(self, list, wlock=None):
605 def add(self, list, wlock=None):
607 if not wlock:
606 if not wlock:
608 wlock = self.wlock()
607 wlock = self.wlock()
609 for f in list:
608 for f in list:
610 p = self.wjoin(f)
609 p = self.wjoin(f)
611 if not os.path.exists(p):
610 if not os.path.exists(p):
612 self.ui.warn(_("%s does not exist!\n") % f)
611 self.ui.warn(_("%s does not exist!\n") % f)
613 elif not os.path.isfile(p):
612 elif not os.path.isfile(p):
614 self.ui.warn(_("%s not added: only files supported currently\n")
613 self.ui.warn(_("%s not added: only files supported currently\n")
615 % f)
614 % f)
616 elif self.dirstate.state(f) in 'an':
615 elif self.dirstate.state(f) in 'an':
617 self.ui.warn(_("%s already tracked!\n") % f)
616 self.ui.warn(_("%s already tracked!\n") % f)
618 else:
617 else:
619 self.dirstate.update([f], "a")
618 self.dirstate.update([f], "a")
620
619
621 def forget(self, list, wlock=None):
620 def forget(self, list, wlock=None):
622 if not wlock:
621 if not wlock:
623 wlock = self.wlock()
622 wlock = self.wlock()
624 for f in list:
623 for f in list:
625 if self.dirstate.state(f) not in 'ai':
624 if self.dirstate.state(f) not in 'ai':
626 self.ui.warn(_("%s not added!\n") % f)
625 self.ui.warn(_("%s not added!\n") % f)
627 else:
626 else:
628 self.dirstate.forget([f])
627 self.dirstate.forget([f])
629
628
630 def remove(self, list, unlink=False, wlock=None):
629 def remove(self, list, unlink=False, wlock=None):
631 if unlink:
630 if unlink:
632 for f in list:
631 for f in list:
633 try:
632 try:
634 util.unlink(self.wjoin(f))
633 util.unlink(self.wjoin(f))
635 except OSError, inst:
634 except OSError, inst:
636 if inst.errno != errno.ENOENT:
635 if inst.errno != errno.ENOENT:
637 raise
636 raise
638 if not wlock:
637 if not wlock:
639 wlock = self.wlock()
638 wlock = self.wlock()
640 for f in list:
639 for f in list:
641 p = self.wjoin(f)
640 p = self.wjoin(f)
642 if os.path.exists(p):
641 if os.path.exists(p):
643 self.ui.warn(_("%s still exists!\n") % f)
642 self.ui.warn(_("%s still exists!\n") % f)
644 elif self.dirstate.state(f) == 'a':
643 elif self.dirstate.state(f) == 'a':
645 self.dirstate.forget([f])
644 self.dirstate.forget([f])
646 elif f not in self.dirstate:
645 elif f not in self.dirstate:
647 self.ui.warn(_("%s not tracked!\n") % f)
646 self.ui.warn(_("%s not tracked!\n") % f)
648 else:
647 else:
649 self.dirstate.update([f], "r")
648 self.dirstate.update([f], "r")
650
649
651 def undelete(self, list, wlock=None):
650 def undelete(self, list, wlock=None):
652 p = self.dirstate.parents()[0]
651 p = self.dirstate.parents()[0]
653 mn = self.changelog.read(p)[0]
652 mn = self.changelog.read(p)[0]
654 mf = self.manifest.readflags(mn)
653 mf = self.manifest.readflags(mn)
655 m = self.manifest.read(mn)
654 m = self.manifest.read(mn)
656 if not wlock:
655 if not wlock:
657 wlock = self.wlock()
656 wlock = self.wlock()
658 for f in list:
657 for f in list:
659 if self.dirstate.state(f) not in "r":
658 if self.dirstate.state(f) not in "r":
660 self.ui.warn("%s not removed!\n" % f)
659 self.ui.warn("%s not removed!\n" % f)
661 else:
660 else:
662 t = self.file(f).read(m[f])
661 t = self.file(f).read(m[f])
663 self.wwrite(f, t)
662 self.wwrite(f, t)
664 util.set_exec(self.wjoin(f), mf[f])
663 util.set_exec(self.wjoin(f), mf[f])
665 self.dirstate.update([f], "n")
664 self.dirstate.update([f], "n")
666
665
667 def copy(self, source, dest, wlock=None):
666 def copy(self, source, dest, wlock=None):
668 p = self.wjoin(dest)
667 p = self.wjoin(dest)
669 if not os.path.exists(p):
668 if not os.path.exists(p):
670 self.ui.warn(_("%s does not exist!\n") % dest)
669 self.ui.warn(_("%s does not exist!\n") % dest)
671 elif not os.path.isfile(p):
670 elif not os.path.isfile(p):
672 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
671 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
673 else:
672 else:
674 if not wlock:
673 if not wlock:
675 wlock = self.wlock()
674 wlock = self.wlock()
676 if self.dirstate.state(dest) == '?':
675 if self.dirstate.state(dest) == '?':
677 self.dirstate.update([dest], "a")
676 self.dirstate.update([dest], "a")
678 self.dirstate.copy(source, dest)
677 self.dirstate.copy(source, dest)
679
678
680 def heads(self, start=None):
679 def heads(self, start=None):
681 heads = self.changelog.heads(start)
680 heads = self.changelog.heads(start)
682 # sort the output in rev descending order
681 # sort the output in rev descending order
683 heads = [(-self.changelog.rev(h), h) for h in heads]
682 heads = [(-self.changelog.rev(h), h) for h in heads]
684 heads.sort()
683 heads.sort()
685 return [n for (r, n) in heads]
684 return [n for (r, n) in heads]
686
685
687 # branchlookup returns a dict giving a list of branches for
686 # branchlookup returns a dict giving a list of branches for
688 # each head. A branch is defined as the tag of a node or
687 # each head. A branch is defined as the tag of a node or
689 # the branch of the node's parents. If a node has multiple
688 # the branch of the node's parents. If a node has multiple
690 # branch tags, tags are eliminated if they are visible from other
689 # branch tags, tags are eliminated if they are visible from other
691 # branch tags.
690 # branch tags.
692 #
691 #
693 # So, for this graph: a->b->c->d->e
692 # So, for this graph: a->b->c->d->e
694 # \ /
693 # \ /
695 # aa -----/
694 # aa -----/
696 # a has tag 2.6.12
695 # a has tag 2.6.12
697 # d has tag 2.6.13
696 # d has tag 2.6.13
698 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
697 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
699 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
698 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
700 # from the list.
699 # from the list.
701 #
700 #
702 # It is possible that more than one head will have the same branch tag.
701 # It is possible that more than one head will have the same branch tag.
703 # callers need to check the result for multiple heads under the same
702 # callers need to check the result for multiple heads under the same
704 # branch tag if that is a problem for them (ie checkout of a specific
703 # branch tag if that is a problem for them (ie checkout of a specific
705 # branch).
704 # branch).
706 #
705 #
707 # passing in a specific branch will limit the depth of the search
706 # passing in a specific branch will limit the depth of the search
708 # through the parents. It won't limit the branches returned in the
707 # through the parents. It won't limit the branches returned in the
709 # result though.
708 # result though.
710 def branchlookup(self, heads=None, branch=None):
709 def branchlookup(self, heads=None, branch=None):
711 if not heads:
710 if not heads:
712 heads = self.heads()
711 heads = self.heads()
713 headt = [ h for h in heads ]
712 headt = [ h for h in heads ]
714 chlog = self.changelog
713 chlog = self.changelog
715 branches = {}
714 branches = {}
716 merges = []
715 merges = []
717 seenmerge = {}
716 seenmerge = {}
718
717
719 # traverse the tree once for each head, recording in the branches
718 # traverse the tree once for each head, recording in the branches
720 # dict which tags are visible from this head. The branches
719 # dict which tags are visible from this head. The branches
721 # dict also records which tags are visible from each tag
720 # dict also records which tags are visible from each tag
722 # while we traverse.
721 # while we traverse.
723 while headt or merges:
722 while headt or merges:
724 if merges:
723 if merges:
725 n, found = merges.pop()
724 n, found = merges.pop()
726 visit = [n]
725 visit = [n]
727 else:
726 else:
728 h = headt.pop()
727 h = headt.pop()
729 visit = [h]
728 visit = [h]
730 found = [h]
729 found = [h]
731 seen = {}
730 seen = {}
732 while visit:
731 while visit:
733 n = visit.pop()
732 n = visit.pop()
734 if n in seen:
733 if n in seen:
735 continue
734 continue
736 pp = chlog.parents(n)
735 pp = chlog.parents(n)
737 tags = self.nodetags(n)
736 tags = self.nodetags(n)
738 if tags:
737 if tags:
739 for x in tags:
738 for x in tags:
740 if x == 'tip':
739 if x == 'tip':
741 continue
740 continue
742 for f in found:
741 for f in found:
743 branches.setdefault(f, {})[n] = 1
742 branches.setdefault(f, {})[n] = 1
744 branches.setdefault(n, {})[n] = 1
743 branches.setdefault(n, {})[n] = 1
745 break
744 break
746 if n not in found:
745 if n not in found:
747 found.append(n)
746 found.append(n)
748 if branch in tags:
747 if branch in tags:
749 continue
748 continue
750 seen[n] = 1
749 seen[n] = 1
751 if pp[1] != nullid and n not in seenmerge:
750 if pp[1] != nullid and n not in seenmerge:
752 merges.append((pp[1], [x for x in found]))
751 merges.append((pp[1], [x for x in found]))
753 seenmerge[n] = 1
752 seenmerge[n] = 1
754 if pp[0] != nullid:
753 if pp[0] != nullid:
755 visit.append(pp[0])
754 visit.append(pp[0])
756 # traverse the branches dict, eliminating branch tags from each
755 # traverse the branches dict, eliminating branch tags from each
757 # head that are visible from another branch tag for that head.
756 # head that are visible from another branch tag for that head.
758 out = {}
757 out = {}
759 viscache = {}
758 viscache = {}
760 for h in heads:
759 for h in heads:
761 def visible(node):
760 def visible(node):
762 if node in viscache:
761 if node in viscache:
763 return viscache[node]
762 return viscache[node]
764 ret = {}
763 ret = {}
765 visit = [node]
764 visit = [node]
766 while visit:
765 while visit:
767 x = visit.pop()
766 x = visit.pop()
768 if x in viscache:
767 if x in viscache:
769 ret.update(viscache[x])
768 ret.update(viscache[x])
770 elif x not in ret:
769 elif x not in ret:
771 ret[x] = 1
770 ret[x] = 1
772 if x in branches:
771 if x in branches:
773 visit[len(visit):] = branches[x].keys()
772 visit[len(visit):] = branches[x].keys()
774 viscache[node] = ret
773 viscache[node] = ret
775 return ret
774 return ret
776 if h not in branches:
775 if h not in branches:
777 continue
776 continue
778 # O(n^2), but somewhat limited. This only searches the
777 # O(n^2), but somewhat limited. This only searches the
779 # tags visible from a specific head, not all the tags in the
778 # tags visible from a specific head, not all the tags in the
780 # whole repo.
779 # whole repo.
781 for b in branches[h]:
780 for b in branches[h]:
782 vis = False
781 vis = False
783 for bb in branches[h].keys():
782 for bb in branches[h].keys():
784 if b != bb:
783 if b != bb:
785 if b in visible(bb):
784 if b in visible(bb):
786 vis = True
785 vis = True
787 break
786 break
788 if not vis:
787 if not vis:
789 l = out.setdefault(h, [])
788 l = out.setdefault(h, [])
790 l[len(l):] = self.nodetags(b)
789 l[len(l):] = self.nodetags(b)
791 return out
790 return out
792
791
793 def branches(self, nodes):
792 def branches(self, nodes):
794 if not nodes:
793 if not nodes:
795 nodes = [self.changelog.tip()]
794 nodes = [self.changelog.tip()]
796 b = []
795 b = []
797 for n in nodes:
796 for n in nodes:
798 t = n
797 t = n
799 while n:
798 while n:
800 p = self.changelog.parents(n)
799 p = self.changelog.parents(n)
801 if p[1] != nullid or p[0] == nullid:
800 if p[1] != nullid or p[0] == nullid:
802 b.append((t, n, p[0], p[1]))
801 b.append((t, n, p[0], p[1]))
803 break
802 break
804 n = p[0]
803 n = p[0]
805 return b
804 return b
806
805
807 def between(self, pairs):
806 def between(self, pairs):
808 r = []
807 r = []
809
808
810 for top, bottom in pairs:
809 for top, bottom in pairs:
811 n, l, i = top, [], 0
810 n, l, i = top, [], 0
812 f = 1
811 f = 1
813
812
814 while n != bottom:
813 while n != bottom:
815 p = self.changelog.parents(n)[0]
814 p = self.changelog.parents(n)[0]
816 if i == f:
815 if i == f:
817 l.append(n)
816 l.append(n)
818 f = f * 2
817 f = f * 2
819 n = p
818 n = p
820 i += 1
819 i += 1
821
820
822 r.append(l)
821 r.append(l)
823
822
824 return r
823 return r
825
824
826 def findincoming(self, remote, base=None, heads=None, force=False):
825 def findincoming(self, remote, base=None, heads=None, force=False):
827 m = self.changelog.nodemap
826 m = self.changelog.nodemap
828 search = []
827 search = []
829 fetch = {}
828 fetch = {}
830 seen = {}
829 seen = {}
831 seenbranch = {}
830 seenbranch = {}
832 if base == None:
831 if base == None:
833 base = {}
832 base = {}
834
833
835 # assume we're closer to the tip than the root
834 # assume we're closer to the tip than the root
836 # and start by examining the heads
835 # and start by examining the heads
837 self.ui.status(_("searching for changes\n"))
836 self.ui.status(_("searching for changes\n"))
838
837
839 if not heads:
838 if not heads:
840 heads = remote.heads()
839 heads = remote.heads()
841
840
842 unknown = []
841 unknown = []
843 for h in heads:
842 for h in heads:
844 if h not in m:
843 if h not in m:
845 unknown.append(h)
844 unknown.append(h)
846 else:
845 else:
847 base[h] = 1
846 base[h] = 1
848
847
849 if not unknown:
848 if not unknown:
850 return []
849 return []
851
850
852 rep = {}
851 rep = {}
853 reqcnt = 0
852 reqcnt = 0
854
853
855 # search through remote branches
854 # search through remote branches
856 # a 'branch' here is a linear segment of history, with four parts:
855 # a 'branch' here is a linear segment of history, with four parts:
857 # head, root, first parent, second parent
856 # head, root, first parent, second parent
858 # (a branch always has two parents (or none) by definition)
857 # (a branch always has two parents (or none) by definition)
859 unknown = remote.branches(unknown)
858 unknown = remote.branches(unknown)
860 while unknown:
859 while unknown:
861 r = []
860 r = []
862 while unknown:
861 while unknown:
863 n = unknown.pop(0)
862 n = unknown.pop(0)
864 if n[0] in seen:
863 if n[0] in seen:
865 continue
864 continue
866
865
867 self.ui.debug(_("examining %s:%s\n")
866 self.ui.debug(_("examining %s:%s\n")
868 % (short(n[0]), short(n[1])))
867 % (short(n[0]), short(n[1])))
869 if n[0] == nullid:
868 if n[0] == nullid:
870 break
869 break
871 if n in seenbranch:
870 if n in seenbranch:
872 self.ui.debug(_("branch already found\n"))
871 self.ui.debug(_("branch already found\n"))
873 continue
872 continue
874 if n[1] and n[1] in m: # do we know the base?
873 if n[1] and n[1] in m: # do we know the base?
875 self.ui.debug(_("found incomplete branch %s:%s\n")
874 self.ui.debug(_("found incomplete branch %s:%s\n")
876 % (short(n[0]), short(n[1])))
875 % (short(n[0]), short(n[1])))
877 search.append(n) # schedule branch range for scanning
876 search.append(n) # schedule branch range for scanning
878 seenbranch[n] = 1
877 seenbranch[n] = 1
879 else:
878 else:
880 if n[1] not in seen and n[1] not in fetch:
879 if n[1] not in seen and n[1] not in fetch:
881 if n[2] in m and n[3] in m:
880 if n[2] in m and n[3] in m:
882 self.ui.debug(_("found new changeset %s\n") %
881 self.ui.debug(_("found new changeset %s\n") %
883 short(n[1]))
882 short(n[1]))
884 fetch[n[1]] = 1 # earliest unknown
883 fetch[n[1]] = 1 # earliest unknown
885 base[n[2]] = 1 # latest known
884 base[n[2]] = 1 # latest known
886 continue
885 continue
887
886
888 for a in n[2:4]:
887 for a in n[2:4]:
889 if a not in rep:
888 if a not in rep:
890 r.append(a)
889 r.append(a)
891 rep[a] = 1
890 rep[a] = 1
892
891
893 seen[n[0]] = 1
892 seen[n[0]] = 1
894
893
895 if r:
894 if r:
896 reqcnt += 1
895 reqcnt += 1
897 self.ui.debug(_("request %d: %s\n") %
896 self.ui.debug(_("request %d: %s\n") %
898 (reqcnt, " ".join(map(short, r))))
897 (reqcnt, " ".join(map(short, r))))
899 for p in range(0, len(r), 10):
898 for p in range(0, len(r), 10):
900 for b in remote.branches(r[p:p+10]):
899 for b in remote.branches(r[p:p+10]):
901 self.ui.debug(_("received %s:%s\n") %
900 self.ui.debug(_("received %s:%s\n") %
902 (short(b[0]), short(b[1])))
901 (short(b[0]), short(b[1])))
903 if b[0] in m:
902 if b[0] in m:
904 self.ui.debug(_("found base node %s\n")
903 self.ui.debug(_("found base node %s\n")
905 % short(b[0]))
904 % short(b[0]))
906 base[b[0]] = 1
905 base[b[0]] = 1
907 elif b[0] not in seen:
906 elif b[0] not in seen:
908 unknown.append(b)
907 unknown.append(b)
909
908
910 # do binary search on the branches we found
909 # do binary search on the branches we found
911 while search:
910 while search:
912 n = search.pop(0)
911 n = search.pop(0)
913 reqcnt += 1
912 reqcnt += 1
914 l = remote.between([(n[0], n[1])])[0]
913 l = remote.between([(n[0], n[1])])[0]
915 l.append(n[1])
914 l.append(n[1])
916 p = n[0]
915 p = n[0]
917 f = 1
916 f = 1
918 for i in l:
917 for i in l:
919 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
918 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
920 if i in m:
919 if i in m:
921 if f <= 2:
920 if f <= 2:
922 self.ui.debug(_("found new branch changeset %s\n") %
921 self.ui.debug(_("found new branch changeset %s\n") %
923 short(p))
922 short(p))
924 fetch[p] = 1
923 fetch[p] = 1
925 base[i] = 1
924 base[i] = 1
926 else:
925 else:
927 self.ui.debug(_("narrowed branch search to %s:%s\n")
926 self.ui.debug(_("narrowed branch search to %s:%s\n")
928 % (short(p), short(i)))
927 % (short(p), short(i)))
929 search.append((p, i))
928 search.append((p, i))
930 break
929 break
931 p, f = i, f * 2
930 p, f = i, f * 2
932
931
933 # sanity check our fetch list
932 # sanity check our fetch list
934 for f in fetch.keys():
933 for f in fetch.keys():
935 if f in m:
934 if f in m:
936 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
935 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
937
936
938 if base.keys() == [nullid]:
937 if base.keys() == [nullid]:
939 if force:
938 if force:
940 self.ui.warn(_("warning: repository is unrelated\n"))
939 self.ui.warn(_("warning: repository is unrelated\n"))
941 else:
940 else:
942 raise util.Abort(_("repository is unrelated"))
941 raise util.Abort(_("repository is unrelated"))
943
942
944 self.ui.note(_("found new changesets starting at ") +
943 self.ui.note(_("found new changesets starting at ") +
945 " ".join([short(f) for f in fetch]) + "\n")
944 " ".join([short(f) for f in fetch]) + "\n")
946
945
947 self.ui.debug(_("%d total queries\n") % reqcnt)
946 self.ui.debug(_("%d total queries\n") % reqcnt)
948
947
949 return fetch.keys()
948 return fetch.keys()
950
949
951 def findoutgoing(self, remote, base=None, heads=None, force=False):
950 def findoutgoing(self, remote, base=None, heads=None, force=False):
952 """Return list of nodes that are roots of subsets not in remote
951 """Return list of nodes that are roots of subsets not in remote
953
952
954 If base dict is specified, assume that these nodes and their parents
953 If base dict is specified, assume that these nodes and their parents
955 exist on the remote side.
954 exist on the remote side.
956 If a list of heads is specified, return only nodes which are heads
955 If a list of heads is specified, return only nodes which are heads
957 or ancestors of these heads, and return a second element which
956 or ancestors of these heads, and return a second element which
958 contains all remote heads which get new children.
957 contains all remote heads which get new children.
959 """
958 """
960 if base == None:
959 if base == None:
961 base = {}
960 base = {}
962 self.findincoming(remote, base, heads, force=force)
961 self.findincoming(remote, base, heads, force=force)
963
962
964 self.ui.debug(_("common changesets up to ")
963 self.ui.debug(_("common changesets up to ")
965 + " ".join(map(short, base.keys())) + "\n")
964 + " ".join(map(short, base.keys())) + "\n")
966
965
967 remain = dict.fromkeys(self.changelog.nodemap)
966 remain = dict.fromkeys(self.changelog.nodemap)
968
967
969 # prune everything remote has from the tree
968 # prune everything remote has from the tree
970 del remain[nullid]
969 del remain[nullid]
971 remove = base.keys()
970 remove = base.keys()
972 while remove:
971 while remove:
973 n = remove.pop(0)
972 n = remove.pop(0)
974 if n in remain:
973 if n in remain:
975 del remain[n]
974 del remain[n]
976 for p in self.changelog.parents(n):
975 for p in self.changelog.parents(n):
977 remove.append(p)
976 remove.append(p)
978
977
979 # find every node whose parents have been pruned
978 # find every node whose parents have been pruned
980 subset = []
979 subset = []
981 # find every remote head that will get new children
980 # find every remote head that will get new children
982 updated_heads = {}
981 updated_heads = {}
983 for n in remain:
982 for n in remain:
984 p1, p2 = self.changelog.parents(n)
983 p1, p2 = self.changelog.parents(n)
985 if p1 not in remain and p2 not in remain:
984 if p1 not in remain and p2 not in remain:
986 subset.append(n)
985 subset.append(n)
987 if heads:
986 if heads:
988 if p1 in heads:
987 if p1 in heads:
989 updated_heads[p1] = True
988 updated_heads[p1] = True
990 if p2 in heads:
989 if p2 in heads:
991 updated_heads[p2] = True
990 updated_heads[p2] = True
992
991
993 # this is the set of all roots we have to push
992 # this is the set of all roots we have to push
994 if heads:
993 if heads:
995 return subset, updated_heads.keys()
994 return subset, updated_heads.keys()
996 else:
995 else:
997 return subset
996 return subset
998
997
999 def pull(self, remote, heads=None, force=False):
998 def pull(self, remote, heads=None, force=False):
1000 l = self.lock()
999 l = self.lock()
1001
1000
1002 # if we have an empty repo, fetch everything
1001 # if we have an empty repo, fetch everything
1003 if self.changelog.tip() == nullid:
1002 if self.changelog.tip() == nullid:
1004 self.ui.status(_("requesting all changes\n"))
1003 self.ui.status(_("requesting all changes\n"))
1005 fetch = [nullid]
1004 fetch = [nullid]
1006 else:
1005 else:
1007 fetch = self.findincoming(remote, force=force)
1006 fetch = self.findincoming(remote, force=force)
1008
1007
1009 if not fetch:
1008 if not fetch:
1010 self.ui.status(_("no changes found\n"))
1009 self.ui.status(_("no changes found\n"))
1011 return 0
1010 return 0
1012
1011
1013 if heads is None:
1012 if heads is None:
1014 cg = remote.changegroup(fetch, 'pull')
1013 cg = remote.changegroup(fetch, 'pull')
1015 else:
1014 else:
1016 cg = remote.changegroupsubset(fetch, heads, 'pull')
1015 cg = remote.changegroupsubset(fetch, heads, 'pull')
1017 return self.addchangegroup(cg)
1016 return self.addchangegroup(cg)
1018
1017
1019 def push(self, remote, force=False, revs=None):
1018 def push(self, remote, force=False, revs=None):
1020 lock = remote.lock()
1019 lock = remote.lock()
1021
1020
1022 base = {}
1021 base = {}
1023 remote_heads = remote.heads()
1022 remote_heads = remote.heads()
1024 inc = self.findincoming(remote, base, remote_heads, force=force)
1023 inc = self.findincoming(remote, base, remote_heads, force=force)
1025 if not force and inc:
1024 if not force and inc:
1026 self.ui.warn(_("abort: unsynced remote changes!\n"))
1025 self.ui.warn(_("abort: unsynced remote changes!\n"))
1027 self.ui.status(_("(did you forget to sync?"
1026 self.ui.status(_("(did you forget to sync?"
1028 " use push -f to force)\n"))
1027 " use push -f to force)\n"))
1029 return 1
1028 return 1
1030
1029
1031 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1030 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1032 if revs is not None:
1031 if revs is not None:
1033 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1032 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1034 else:
1033 else:
1035 bases, heads = update, self.changelog.heads()
1034 bases, heads = update, self.changelog.heads()
1036
1035
1037 if not bases:
1036 if not bases:
1038 self.ui.status(_("no changes found\n"))
1037 self.ui.status(_("no changes found\n"))
1039 return 1
1038 return 1
1040 elif not force:
1039 elif not force:
1041 if revs is not None:
1040 if revs is not None:
1042 updated_heads = {}
1041 updated_heads = {}
1043 for base in msng_cl:
1042 for base in msng_cl:
1044 for parent in self.changelog.parents(base):
1043 for parent in self.changelog.parents(base):
1045 if parent in remote_heads:
1044 if parent in remote_heads:
1046 updated_heads[parent] = True
1045 updated_heads[parent] = True
1047 updated_heads = updated_heads.keys()
1046 updated_heads = updated_heads.keys()
1048 if len(updated_heads) < len(heads):
1047 if len(updated_heads) < len(heads):
1049 self.ui.warn(_("abort: push creates new remote branches!\n"))
1048 self.ui.warn(_("abort: push creates new remote branches!\n"))
1050 self.ui.status(_("(did you forget to merge?"
1049 self.ui.status(_("(did you forget to merge?"
1051 " use push -f to force)\n"))
1050 " use push -f to force)\n"))
1052 return 1
1051 return 1
1053
1052
1054 if revs is None:
1053 if revs is None:
1055 cg = self.changegroup(update, 'push')
1054 cg = self.changegroup(update, 'push')
1056 else:
1055 else:
1057 cg = self.changegroupsubset(update, revs, 'push')
1056 cg = self.changegroupsubset(update, revs, 'push')
1058 return remote.addchangegroup(cg)
1057 return remote.addchangegroup(cg)
1059
1058
1060 def changegroupsubset(self, bases, heads, source):
1059 def changegroupsubset(self, bases, heads, source):
1061 """This function generates a changegroup consisting of all the nodes
1060 """This function generates a changegroup consisting of all the nodes
1062 that are descendents of any of the bases, and ancestors of any of
1061 that are descendents of any of the bases, and ancestors of any of
1063 the heads.
1062 the heads.
1064
1063
1065 It is fairly complex as determining which filenodes and which
1064 It is fairly complex as determining which filenodes and which
1066 manifest nodes need to be included for the changeset to be complete
1065 manifest nodes need to be included for the changeset to be complete
1067 is non-trivial.
1066 is non-trivial.
1068
1067
1069 Another wrinkle is doing the reverse, figuring out which changeset in
1068 Another wrinkle is doing the reverse, figuring out which changeset in
1070 the changegroup a particular filenode or manifestnode belongs to."""
1069 the changegroup a particular filenode or manifestnode belongs to."""
1071
1070
1072 self.hook('preoutgoing', throw=True, source=source)
1071 self.hook('preoutgoing', throw=True, source=source)
1073
1072
1074 # Set up some initial variables
1073 # Set up some initial variables
1075 # Make it easy to refer to self.changelog
1074 # Make it easy to refer to self.changelog
1076 cl = self.changelog
1075 cl = self.changelog
1077 # msng is short for missing - compute the list of changesets in this
1076 # msng is short for missing - compute the list of changesets in this
1078 # changegroup.
1077 # changegroup.
1079 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1078 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1080 # Some bases may turn out to be superfluous, and some heads may be
1079 # Some bases may turn out to be superfluous, and some heads may be
1081 # too. nodesbetween will return the minimal set of bases and heads
1080 # too. nodesbetween will return the minimal set of bases and heads
1082 # necessary to re-create the changegroup.
1081 # necessary to re-create the changegroup.
1083
1082
1084 # Known heads are the list of heads that it is assumed the recipient
1083 # Known heads are the list of heads that it is assumed the recipient
1085 # of this changegroup will know about.
1084 # of this changegroup will know about.
1086 knownheads = {}
1085 knownheads = {}
1087 # We assume that all parents of bases are known heads.
1086 # We assume that all parents of bases are known heads.
1088 for n in bases:
1087 for n in bases:
1089 for p in cl.parents(n):
1088 for p in cl.parents(n):
1090 if p != nullid:
1089 if p != nullid:
1091 knownheads[p] = 1
1090 knownheads[p] = 1
1092 knownheads = knownheads.keys()
1091 knownheads = knownheads.keys()
1093 if knownheads:
1092 if knownheads:
1094 # Now that we know what heads are known, we can compute which
1093 # Now that we know what heads are known, we can compute which
1095 # changesets are known. The recipient must know about all
1094 # changesets are known. The recipient must know about all
1096 # changesets required to reach the known heads from the null
1095 # changesets required to reach the known heads from the null
1097 # changeset.
1096 # changeset.
1098 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1097 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1099 junk = None
1098 junk = None
1100 # Transform the list into an ersatz set.
1099 # Transform the list into an ersatz set.
1101 has_cl_set = dict.fromkeys(has_cl_set)
1100 has_cl_set = dict.fromkeys(has_cl_set)
1102 else:
1101 else:
1103 # If there were no known heads, the recipient cannot be assumed to
1102 # If there were no known heads, the recipient cannot be assumed to
1104 # know about any changesets.
1103 # know about any changesets.
1105 has_cl_set = {}
1104 has_cl_set = {}
1106
1105
1107 # Make it easy to refer to self.manifest
1106 # Make it easy to refer to self.manifest
1108 mnfst = self.manifest
1107 mnfst = self.manifest
1109 # We don't know which manifests are missing yet
1108 # We don't know which manifests are missing yet
1110 msng_mnfst_set = {}
1109 msng_mnfst_set = {}
1111 # Nor do we know which filenodes are missing.
1110 # Nor do we know which filenodes are missing.
1112 msng_filenode_set = {}
1111 msng_filenode_set = {}
1113
1112
1114 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1113 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1115 junk = None
1114 junk = None
1116
1115
1117 # A changeset always belongs to itself, so the changenode lookup
1116 # A changeset always belongs to itself, so the changenode lookup
1118 # function for a changenode is identity.
1117 # function for a changenode is identity.
1119 def identity(x):
1118 def identity(x):
1120 return x
1119 return x
1121
1120
1122 # A function generating function. Sets up an environment for the
1121 # A function generating function. Sets up an environment for the
1123 # inner function.
1122 # inner function.
1124 def cmp_by_rev_func(revlog):
1123 def cmp_by_rev_func(revlog):
1125 # Compare two nodes by their revision number in the environment's
1124 # Compare two nodes by their revision number in the environment's
1126 # revision history. Since the revision number both represents the
1125 # revision history. Since the revision number both represents the
1127 # most efficient order to read the nodes in, and represents a
1126 # most efficient order to read the nodes in, and represents a
1128 # topological sorting of the nodes, this function is often useful.
1127 # topological sorting of the nodes, this function is often useful.
1129 def cmp_by_rev(a, b):
1128 def cmp_by_rev(a, b):
1130 return cmp(revlog.rev(a), revlog.rev(b))
1129 return cmp(revlog.rev(a), revlog.rev(b))
1131 return cmp_by_rev
1130 return cmp_by_rev
1132
1131
1133 # If we determine that a particular file or manifest node must be a
1132 # If we determine that a particular file or manifest node must be a
1134 # node that the recipient of the changegroup will already have, we can
1133 # node that the recipient of the changegroup will already have, we can
1135 # also assume the recipient will have all the parents. This function
1134 # also assume the recipient will have all the parents. This function
1136 # prunes them from the set of missing nodes.
1135 # prunes them from the set of missing nodes.
1137 def prune_parents(revlog, hasset, msngset):
1136 def prune_parents(revlog, hasset, msngset):
1138 haslst = hasset.keys()
1137 haslst = hasset.keys()
1139 haslst.sort(cmp_by_rev_func(revlog))
1138 haslst.sort(cmp_by_rev_func(revlog))
1140 for node in haslst:
1139 for node in haslst:
1141 parentlst = [p for p in revlog.parents(node) if p != nullid]
1140 parentlst = [p for p in revlog.parents(node) if p != nullid]
1142 while parentlst:
1141 while parentlst:
1143 n = parentlst.pop()
1142 n = parentlst.pop()
1144 if n not in hasset:
1143 if n not in hasset:
1145 hasset[n] = 1
1144 hasset[n] = 1
1146 p = [p for p in revlog.parents(n) if p != nullid]
1145 p = [p for p in revlog.parents(n) if p != nullid]
1147 parentlst.extend(p)
1146 parentlst.extend(p)
1148 for n in hasset:
1147 for n in hasset:
1149 msngset.pop(n, None)
1148 msngset.pop(n, None)
1150
1149
1151 # This is a function generating function used to set up an environment
1150 # This is a function generating function used to set up an environment
1152 # for the inner function to execute in.
1151 # for the inner function to execute in.
1153 def manifest_and_file_collector(changedfileset):
1152 def manifest_and_file_collector(changedfileset):
1154 # This is an information gathering function that gathers
1153 # This is an information gathering function that gathers
1155 # information from each changeset node that goes out as part of
1154 # information from each changeset node that goes out as part of
1156 # the changegroup. The information gathered is a list of which
1155 # the changegroup. The information gathered is a list of which
1157 # manifest nodes are potentially required (the recipient may
1156 # manifest nodes are potentially required (the recipient may
1158 # already have them) and total list of all files which were
1157 # already have them) and total list of all files which were
1159 # changed in any changeset in the changegroup.
1158 # changed in any changeset in the changegroup.
1160 #
1159 #
1161 # We also remember the first changenode we saw any manifest
1160 # We also remember the first changenode we saw any manifest
1162 # referenced by so we can later determine which changenode 'owns'
1161 # referenced by so we can later determine which changenode 'owns'
1163 # the manifest.
1162 # the manifest.
1164 def collect_manifests_and_files(clnode):
1163 def collect_manifests_and_files(clnode):
1165 c = cl.read(clnode)
1164 c = cl.read(clnode)
1166 for f in c[3]:
1165 for f in c[3]:
1167 # This is to make sure we only have one instance of each
1166 # This is to make sure we only have one instance of each
1168 # filename string for each filename.
1167 # filename string for each filename.
1169 changedfileset.setdefault(f, f)
1168 changedfileset.setdefault(f, f)
1170 msng_mnfst_set.setdefault(c[0], clnode)
1169 msng_mnfst_set.setdefault(c[0], clnode)
1171 return collect_manifests_and_files
1170 return collect_manifests_and_files
1172
1171
1173 # Figure out which manifest nodes (of the ones we think might be part
1172 # Figure out which manifest nodes (of the ones we think might be part
1174 # of the changegroup) the recipient must know about and remove them
1173 # of the changegroup) the recipient must know about and remove them
1175 # from the changegroup.
1174 # from the changegroup.
1176 def prune_manifests():
1175 def prune_manifests():
1177 has_mnfst_set = {}
1176 has_mnfst_set = {}
1178 for n in msng_mnfst_set:
1177 for n in msng_mnfst_set:
1179 # If a 'missing' manifest thinks it belongs to a changenode
1178 # If a 'missing' manifest thinks it belongs to a changenode
1180 # the recipient is assumed to have, obviously the recipient
1179 # the recipient is assumed to have, obviously the recipient
1181 # must have that manifest.
1180 # must have that manifest.
1182 linknode = cl.node(mnfst.linkrev(n))
1181 linknode = cl.node(mnfst.linkrev(n))
1183 if linknode in has_cl_set:
1182 if linknode in has_cl_set:
1184 has_mnfst_set[n] = 1
1183 has_mnfst_set[n] = 1
1185 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1184 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1186
1185
1187 # Use the information collected in collect_manifests_and_files to say
1186 # Use the information collected in collect_manifests_and_files to say
1188 # which changenode any manifestnode belongs to.
1187 # which changenode any manifestnode belongs to.
1189 def lookup_manifest_link(mnfstnode):
1188 def lookup_manifest_link(mnfstnode):
1190 return msng_mnfst_set[mnfstnode]
1189 return msng_mnfst_set[mnfstnode]
1191
1190
1192 # A function generating function that sets up the initial environment
1191 # A function generating function that sets up the initial environment
1193 # the inner function.
1192 # the inner function.
1194 def filenode_collector(changedfiles):
1193 def filenode_collector(changedfiles):
1195 next_rev = [0]
1194 next_rev = [0]
1196 # This gathers information from each manifestnode included in the
1195 # This gathers information from each manifestnode included in the
1197 # changegroup about which filenodes the manifest node references
1196 # changegroup about which filenodes the manifest node references
1198 # so we can include those in the changegroup too.
1197 # so we can include those in the changegroup too.
1199 #
1198 #
1200 # It also remembers which changenode each filenode belongs to. It
1199 # It also remembers which changenode each filenode belongs to. It
1201 # does this by assuming the a filenode belongs to the changenode
1200 # does this by assuming the a filenode belongs to the changenode
1202 # the first manifest that references it belongs to.
1201 # the first manifest that references it belongs to.
1203 def collect_msng_filenodes(mnfstnode):
1202 def collect_msng_filenodes(mnfstnode):
1204 r = mnfst.rev(mnfstnode)
1203 r = mnfst.rev(mnfstnode)
1205 if r == next_rev[0]:
1204 if r == next_rev[0]:
1206 # If the last rev we looked at was the one just previous,
1205 # If the last rev we looked at was the one just previous,
1207 # we only need to see a diff.
1206 # we only need to see a diff.
1208 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1207 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1209 # For each line in the delta
1208 # For each line in the delta
1210 for dline in delta.splitlines():
1209 for dline in delta.splitlines():
1211 # get the filename and filenode for that line
1210 # get the filename and filenode for that line
1212 f, fnode = dline.split('\0')
1211 f, fnode = dline.split('\0')
1213 fnode = bin(fnode[:40])
1212 fnode = bin(fnode[:40])
1214 f = changedfiles.get(f, None)
1213 f = changedfiles.get(f, None)
1215 # And if the file is in the list of files we care
1214 # And if the file is in the list of files we care
1216 # about.
1215 # about.
1217 if f is not None:
1216 if f is not None:
1218 # Get the changenode this manifest belongs to
1217 # Get the changenode this manifest belongs to
1219 clnode = msng_mnfst_set[mnfstnode]
1218 clnode = msng_mnfst_set[mnfstnode]
1220 # Create the set of filenodes for the file if
1219 # Create the set of filenodes for the file if
1221 # there isn't one already.
1220 # there isn't one already.
1222 ndset = msng_filenode_set.setdefault(f, {})
1221 ndset = msng_filenode_set.setdefault(f, {})
1223 # And set the filenode's changelog node to the
1222 # And set the filenode's changelog node to the
1224 # manifest's if it hasn't been set already.
1223 # manifest's if it hasn't been set already.
1225 ndset.setdefault(fnode, clnode)
1224 ndset.setdefault(fnode, clnode)
1226 else:
1225 else:
1227 # Otherwise we need a full manifest.
1226 # Otherwise we need a full manifest.
1228 m = mnfst.read(mnfstnode)
1227 m = mnfst.read(mnfstnode)
1229 # For every file in we care about.
1228 # For every file in we care about.
1230 for f in changedfiles:
1229 for f in changedfiles:
1231 fnode = m.get(f, None)
1230 fnode = m.get(f, None)
1232 # If it's in the manifest
1231 # If it's in the manifest
1233 if fnode is not None:
1232 if fnode is not None:
1234 # See comments above.
1233 # See comments above.
1235 clnode = msng_mnfst_set[mnfstnode]
1234 clnode = msng_mnfst_set[mnfstnode]
1236 ndset = msng_filenode_set.setdefault(f, {})
1235 ndset = msng_filenode_set.setdefault(f, {})
1237 ndset.setdefault(fnode, clnode)
1236 ndset.setdefault(fnode, clnode)
1238 # Remember the revision we hope to see next.
1237 # Remember the revision we hope to see next.
1239 next_rev[0] = r + 1
1238 next_rev[0] = r + 1
1240 return collect_msng_filenodes
1239 return collect_msng_filenodes
1241
1240
1242 # We have a list of filenodes we think we need for a file, lets remove
1241 # We have a list of filenodes we think we need for a file, lets remove
1243 # all those we now the recipient must have.
1242 # all those we now the recipient must have.
1244 def prune_filenodes(f, filerevlog):
1243 def prune_filenodes(f, filerevlog):
1245 msngset = msng_filenode_set[f]
1244 msngset = msng_filenode_set[f]
1246 hasset = {}
1245 hasset = {}
1247 # If a 'missing' filenode thinks it belongs to a changenode we
1246 # If a 'missing' filenode thinks it belongs to a changenode we
1248 # assume the recipient must have, then the recipient must have
1247 # assume the recipient must have, then the recipient must have
1249 # that filenode.
1248 # that filenode.
1250 for n in msngset:
1249 for n in msngset:
1251 clnode = cl.node(filerevlog.linkrev(n))
1250 clnode = cl.node(filerevlog.linkrev(n))
1252 if clnode in has_cl_set:
1251 if clnode in has_cl_set:
1253 hasset[n] = 1
1252 hasset[n] = 1
1254 prune_parents(filerevlog, hasset, msngset)
1253 prune_parents(filerevlog, hasset, msngset)
1255
1254
1256 # A function generator function that sets up the a context for the
1255 # A function generator function that sets up the a context for the
1257 # inner function.
1256 # inner function.
1258 def lookup_filenode_link_func(fname):
1257 def lookup_filenode_link_func(fname):
1259 msngset = msng_filenode_set[fname]
1258 msngset = msng_filenode_set[fname]
1260 # Lookup the changenode the filenode belongs to.
1259 # Lookup the changenode the filenode belongs to.
1261 def lookup_filenode_link(fnode):
1260 def lookup_filenode_link(fnode):
1262 return msngset[fnode]
1261 return msngset[fnode]
1263 return lookup_filenode_link
1262 return lookup_filenode_link
1264
1263
1265 # Now that we have all theses utility functions to help out and
1264 # Now that we have all theses utility functions to help out and
1266 # logically divide up the task, generate the group.
1265 # logically divide up the task, generate the group.
1267 def gengroup():
1266 def gengroup():
1268 # The set of changed files starts empty.
1267 # The set of changed files starts empty.
1269 changedfiles = {}
1268 changedfiles = {}
1270 # Create a changenode group generator that will call our functions
1269 # Create a changenode group generator that will call our functions
1271 # back to lookup the owning changenode and collect information.
1270 # back to lookup the owning changenode and collect information.
1272 group = cl.group(msng_cl_lst, identity,
1271 group = cl.group(msng_cl_lst, identity,
1273 manifest_and_file_collector(changedfiles))
1272 manifest_and_file_collector(changedfiles))
1274 for chnk in group:
1273 for chnk in group:
1275 yield chnk
1274 yield chnk
1276
1275
1277 # The list of manifests has been collected by the generator
1276 # The list of manifests has been collected by the generator
1278 # calling our functions back.
1277 # calling our functions back.
1279 prune_manifests()
1278 prune_manifests()
1280 msng_mnfst_lst = msng_mnfst_set.keys()
1279 msng_mnfst_lst = msng_mnfst_set.keys()
1281 # Sort the manifestnodes by revision number.
1280 # Sort the manifestnodes by revision number.
1282 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1281 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1283 # Create a generator for the manifestnodes that calls our lookup
1282 # Create a generator for the manifestnodes that calls our lookup
1284 # and data collection functions back.
1283 # and data collection functions back.
1285 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1284 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1286 filenode_collector(changedfiles))
1285 filenode_collector(changedfiles))
1287 for chnk in group:
1286 for chnk in group:
1288 yield chnk
1287 yield chnk
1289
1288
1290 # These are no longer needed, dereference and toss the memory for
1289 # These are no longer needed, dereference and toss the memory for
1291 # them.
1290 # them.
1292 msng_mnfst_lst = None
1291 msng_mnfst_lst = None
1293 msng_mnfst_set.clear()
1292 msng_mnfst_set.clear()
1294
1293
1295 changedfiles = changedfiles.keys()
1294 changedfiles = changedfiles.keys()
1296 changedfiles.sort()
1295 changedfiles.sort()
1297 # Go through all our files in order sorted by name.
1296 # Go through all our files in order sorted by name.
1298 for fname in changedfiles:
1297 for fname in changedfiles:
1299 filerevlog = self.file(fname)
1298 filerevlog = self.file(fname)
1300 # Toss out the filenodes that the recipient isn't really
1299 # Toss out the filenodes that the recipient isn't really
1301 # missing.
1300 # missing.
1302 if msng_filenode_set.has_key(fname):
1301 if msng_filenode_set.has_key(fname):
1303 prune_filenodes(fname, filerevlog)
1302 prune_filenodes(fname, filerevlog)
1304 msng_filenode_lst = msng_filenode_set[fname].keys()
1303 msng_filenode_lst = msng_filenode_set[fname].keys()
1305 else:
1304 else:
1306 msng_filenode_lst = []
1305 msng_filenode_lst = []
1307 # If any filenodes are left, generate the group for them,
1306 # If any filenodes are left, generate the group for them,
1308 # otherwise don't bother.
1307 # otherwise don't bother.
1309 if len(msng_filenode_lst) > 0:
1308 if len(msng_filenode_lst) > 0:
1310 yield changegroup.genchunk(fname)
1309 yield changegroup.genchunk(fname)
1311 # Sort the filenodes by their revision #
1310 # Sort the filenodes by their revision #
1312 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1311 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1313 # Create a group generator and only pass in a changenode
1312 # Create a group generator and only pass in a changenode
1314 # lookup function as we need to collect no information
1313 # lookup function as we need to collect no information
1315 # from filenodes.
1314 # from filenodes.
1316 group = filerevlog.group(msng_filenode_lst,
1315 group = filerevlog.group(msng_filenode_lst,
1317 lookup_filenode_link_func(fname))
1316 lookup_filenode_link_func(fname))
1318 for chnk in group:
1317 for chnk in group:
1319 yield chnk
1318 yield chnk
1320 if msng_filenode_set.has_key(fname):
1319 if msng_filenode_set.has_key(fname):
1321 # Don't need this anymore, toss it to free memory.
1320 # Don't need this anymore, toss it to free memory.
1322 del msng_filenode_set[fname]
1321 del msng_filenode_set[fname]
1323 # Signal that no more groups are left.
1322 # Signal that no more groups are left.
1324 yield changegroup.closechunk()
1323 yield changegroup.closechunk()
1325
1324
1326 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1325 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1327
1326
1328 return util.chunkbuffer(gengroup())
1327 return util.chunkbuffer(gengroup())
1329
1328
1330 def changegroup(self, basenodes, source):
1329 def changegroup(self, basenodes, source):
1331 """Generate a changegroup of all nodes that we have that a recipient
1330 """Generate a changegroup of all nodes that we have that a recipient
1332 doesn't.
1331 doesn't.
1333
1332
1334 This is much easier than the previous function as we can assume that
1333 This is much easier than the previous function as we can assume that
1335 the recipient has any changenode we aren't sending them."""
1334 the recipient has any changenode we aren't sending them."""
1336
1335
1337 self.hook('preoutgoing', throw=True, source=source)
1336 self.hook('preoutgoing', throw=True, source=source)
1338
1337
1339 cl = self.changelog
1338 cl = self.changelog
1340 nodes = cl.nodesbetween(basenodes, None)[0]
1339 nodes = cl.nodesbetween(basenodes, None)[0]
1341 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1340 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1342
1341
1343 def identity(x):
1342 def identity(x):
1344 return x
1343 return x
1345
1344
1346 def gennodelst(revlog):
1345 def gennodelst(revlog):
1347 for r in xrange(0, revlog.count()):
1346 for r in xrange(0, revlog.count()):
1348 n = revlog.node(r)
1347 n = revlog.node(r)
1349 if revlog.linkrev(n) in revset:
1348 if revlog.linkrev(n) in revset:
1350 yield n
1349 yield n
1351
1350
1352 def changed_file_collector(changedfileset):
1351 def changed_file_collector(changedfileset):
1353 def collect_changed_files(clnode):
1352 def collect_changed_files(clnode):
1354 c = cl.read(clnode)
1353 c = cl.read(clnode)
1355 for fname in c[3]:
1354 for fname in c[3]:
1356 changedfileset[fname] = 1
1355 changedfileset[fname] = 1
1357 return collect_changed_files
1356 return collect_changed_files
1358
1357
1359 def lookuprevlink_func(revlog):
1358 def lookuprevlink_func(revlog):
1360 def lookuprevlink(n):
1359 def lookuprevlink(n):
1361 return cl.node(revlog.linkrev(n))
1360 return cl.node(revlog.linkrev(n))
1362 return lookuprevlink
1361 return lookuprevlink
1363
1362
1364 def gengroup():
1363 def gengroup():
1365 # construct a list of all changed files
1364 # construct a list of all changed files
1366 changedfiles = {}
1365 changedfiles = {}
1367
1366
1368 for chnk in cl.group(nodes, identity,
1367 for chnk in cl.group(nodes, identity,
1369 changed_file_collector(changedfiles)):
1368 changed_file_collector(changedfiles)):
1370 yield chnk
1369 yield chnk
1371 changedfiles = changedfiles.keys()
1370 changedfiles = changedfiles.keys()
1372 changedfiles.sort()
1371 changedfiles.sort()
1373
1372
1374 mnfst = self.manifest
1373 mnfst = self.manifest
1375 nodeiter = gennodelst(mnfst)
1374 nodeiter = gennodelst(mnfst)
1376 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1375 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1377 yield chnk
1376 yield chnk
1378
1377
1379 for fname in changedfiles:
1378 for fname in changedfiles:
1380 filerevlog = self.file(fname)
1379 filerevlog = self.file(fname)
1381 nodeiter = gennodelst(filerevlog)
1380 nodeiter = gennodelst(filerevlog)
1382 nodeiter = list(nodeiter)
1381 nodeiter = list(nodeiter)
1383 if nodeiter:
1382 if nodeiter:
1384 yield changegroup.genchunk(fname)
1383 yield changegroup.genchunk(fname)
1385 lookup = lookuprevlink_func(filerevlog)
1384 lookup = lookuprevlink_func(filerevlog)
1386 for chnk in filerevlog.group(nodeiter, lookup):
1385 for chnk in filerevlog.group(nodeiter, lookup):
1387 yield chnk
1386 yield chnk
1388
1387
1389 yield changegroup.closechunk()
1388 yield changegroup.closechunk()
1390 self.hook('outgoing', node=hex(nodes[0]), source=source)
1389 self.hook('outgoing', node=hex(nodes[0]), source=source)
1391
1390
1392 return util.chunkbuffer(gengroup())
1391 return util.chunkbuffer(gengroup())
1393
1392
1394 def addchangegroup(self, source):
1393 def addchangegroup(self, source):
1395 """add changegroup to repo.
1394 """add changegroup to repo.
1396 returns number of heads modified or added + 1."""
1395 returns number of heads modified or added + 1."""
1397
1396
1398 def csmap(x):
1397 def csmap(x):
1399 self.ui.debug(_("add changeset %s\n") % short(x))
1398 self.ui.debug(_("add changeset %s\n") % short(x))
1400 return cl.count()
1399 return cl.count()
1401
1400
1402 def revmap(x):
1401 def revmap(x):
1403 return cl.rev(x)
1402 return cl.rev(x)
1404
1403
1405 if not source:
1404 if not source:
1406 return 0
1405 return 0
1407
1406
1408 self.hook('prechangegroup', throw=True)
1407 self.hook('prechangegroup', throw=True)
1409
1408
1410 changesets = files = revisions = 0
1409 changesets = files = revisions = 0
1411
1410
1412 tr = self.transaction()
1411 tr = self.transaction()
1413
1412
1414 # write changelog and manifest data to temp files so
1413 # write changelog and manifest data to temp files so
1415 # concurrent readers will not see inconsistent view
1414 # concurrent readers will not see inconsistent view
1416 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1415 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1417
1416
1418 oldheads = len(cl.heads())
1417 oldheads = len(cl.heads())
1419
1418
1420 # pull off the changeset group
1419 # pull off the changeset group
1421 self.ui.status(_("adding changesets\n"))
1420 self.ui.status(_("adding changesets\n"))
1422 co = cl.tip()
1421 co = cl.tip()
1423 chunkiter = changegroup.chunkiter(source)
1422 chunkiter = changegroup.chunkiter(source)
1424 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1423 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1425 cnr, cor = map(cl.rev, (cn, co))
1424 cnr, cor = map(cl.rev, (cn, co))
1426 if cn == nullid:
1425 if cn == nullid:
1427 cnr = cor
1426 cnr = cor
1428 changesets = cnr - cor
1427 changesets = cnr - cor
1429
1428
1430 mf = appendfile.appendmanifest(self.opener, self.manifest.version)
1429 mf = appendfile.appendmanifest(self.opener, self.manifest.version)
1431
1430
1432 # pull off the manifest group
1431 # pull off the manifest group
1433 self.ui.status(_("adding manifests\n"))
1432 self.ui.status(_("adding manifests\n"))
1434 mm = mf.tip()
1433 mm = mf.tip()
1435 chunkiter = changegroup.chunkiter(source)
1434 chunkiter = changegroup.chunkiter(source)
1436 mo = mf.addgroup(chunkiter, revmap, tr)
1435 mo = mf.addgroup(chunkiter, revmap, tr)
1437
1436
1438 # process the files
1437 # process the files
1439 self.ui.status(_("adding file changes\n"))
1438 self.ui.status(_("adding file changes\n"))
1440 while 1:
1439 while 1:
1441 f = changegroup.getchunk(source)
1440 f = changegroup.getchunk(source)
1442 if not f:
1441 if not f:
1443 break
1442 break
1444 self.ui.debug(_("adding %s revisions\n") % f)
1443 self.ui.debug(_("adding %s revisions\n") % f)
1445 fl = self.file(f)
1444 fl = self.file(f)
1446 o = fl.count()
1445 o = fl.count()
1447 chunkiter = changegroup.chunkiter(source)
1446 chunkiter = changegroup.chunkiter(source)
1448 n = fl.addgroup(chunkiter, revmap, tr)
1447 n = fl.addgroup(chunkiter, revmap, tr)
1449 revisions += fl.count() - o
1448 revisions += fl.count() - o
1450 files += 1
1449 files += 1
1451
1450
1452 # write order here is important so concurrent readers will see
1451 # write order here is important so concurrent readers will see
1453 # consistent view of repo
1452 # consistent view of repo
1454 mf.writedata()
1453 mf.writedata()
1455 cl.writedata()
1454 cl.writedata()
1456
1455
1457 # make changelog and manifest see real files again
1456 # make changelog and manifest see real files again
1458 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1457 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1459 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1458 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1460 self.changelog.checkinlinesize(tr)
1459 self.changelog.checkinlinesize(tr)
1461 self.manifest.checkinlinesize(tr)
1460 self.manifest.checkinlinesize(tr)
1462
1461
1463 newheads = len(self.changelog.heads())
1462 newheads = len(self.changelog.heads())
1464 heads = ""
1463 heads = ""
1465 if oldheads and newheads > oldheads:
1464 if oldheads and newheads > oldheads:
1466 heads = _(" (+%d heads)") % (newheads - oldheads)
1465 heads = _(" (+%d heads)") % (newheads - oldheads)
1467
1466
1468 self.ui.status(_("added %d changesets"
1467 self.ui.status(_("added %d changesets"
1469 " with %d changes to %d files%s\n")
1468 " with %d changes to %d files%s\n")
1470 % (changesets, revisions, files, heads))
1469 % (changesets, revisions, files, heads))
1471
1470
1472 self.hook('pretxnchangegroup', throw=True,
1471 self.hook('pretxnchangegroup', throw=True,
1473 node=hex(self.changelog.node(cor+1)))
1472 node=hex(self.changelog.node(cor+1)))
1474
1473
1475 tr.close()
1474 tr.close()
1476
1475
1477 if changesets > 0:
1476 if changesets > 0:
1478 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1477 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1479
1478
1480 for i in range(cor + 1, cnr + 1):
1479 for i in range(cor + 1, cnr + 1):
1481 self.hook("incoming", node=hex(self.changelog.node(i)))
1480 self.hook("incoming", node=hex(self.changelog.node(i)))
1482
1481
1483 return newheads - oldheads + 1
1482 return newheads - oldheads + 1
1484
1483
1485 def update(self, node, allow=False, force=False, choose=None,
1484 def update(self, node, allow=False, force=False, choose=None,
1486 moddirstate=True, forcemerge=False, wlock=None):
1485 moddirstate=True, forcemerge=False, wlock=None):
1487 pl = self.dirstate.parents()
1486 pl = self.dirstate.parents()
1488 if not force and pl[1] != nullid:
1487 if not force and pl[1] != nullid:
1489 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1488 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1490 return 1
1489 return 1
1491
1490
1492 err = False
1491 err = False
1493
1492
1494 p1, p2 = pl[0], node
1493 p1, p2 = pl[0], node
1495 pa = self.changelog.ancestor(p1, p2)
1494 pa = self.changelog.ancestor(p1, p2)
1496 m1n = self.changelog.read(p1)[0]
1495 m1n = self.changelog.read(p1)[0]
1497 m2n = self.changelog.read(p2)[0]
1496 m2n = self.changelog.read(p2)[0]
1498 man = self.manifest.ancestor(m1n, m2n)
1497 man = self.manifest.ancestor(m1n, m2n)
1499 m1 = self.manifest.read(m1n)
1498 m1 = self.manifest.read(m1n)
1500 mf1 = self.manifest.readflags(m1n)
1499 mf1 = self.manifest.readflags(m1n)
1501 m2 = self.manifest.read(m2n).copy()
1500 m2 = self.manifest.read(m2n).copy()
1502 mf2 = self.manifest.readflags(m2n)
1501 mf2 = self.manifest.readflags(m2n)
1503 ma = self.manifest.read(man)
1502 ma = self.manifest.read(man)
1504 mfa = self.manifest.readflags(man)
1503 mfa = self.manifest.readflags(man)
1505
1504
1506 modified, added, removed, deleted, unknown = self.changes()
1505 modified, added, removed, deleted, unknown = self.changes()
1507
1506
1508 # is this a jump, or a merge? i.e. is there a linear path
1507 # is this a jump, or a merge? i.e. is there a linear path
1509 # from p1 to p2?
1508 # from p1 to p2?
1510 linear_path = (pa == p1 or pa == p2)
1509 linear_path = (pa == p1 or pa == p2)
1511
1510
1512 if allow and linear_path:
1511 if allow and linear_path:
1513 raise util.Abort(_("there is nothing to merge, "
1512 raise util.Abort(_("there is nothing to merge, "
1514 "just use 'hg update'"))
1513 "just use 'hg update'"))
1515 if allow and not forcemerge:
1514 if allow and not forcemerge:
1516 if modified or added or removed:
1515 if modified or added or removed:
1517 raise util.Abort(_("outstanding uncommitted changes"))
1516 raise util.Abort(_("outstanding uncommitted changes"))
1518 if not forcemerge and not force:
1517 if not forcemerge and not force:
1519 for f in unknown:
1518 for f in unknown:
1520 if f in m2:
1519 if f in m2:
1521 t1 = self.wread(f)
1520 t1 = self.wread(f)
1522 t2 = self.file(f).read(m2[f])
1521 t2 = self.file(f).read(m2[f])
1523 if cmp(t1, t2) != 0:
1522 if cmp(t1, t2) != 0:
1524 raise util.Abort(_("'%s' already exists in the working"
1523 raise util.Abort(_("'%s' already exists in the working"
1525 " dir and differs from remote") % f)
1524 " dir and differs from remote") % f)
1526
1525
1527 # resolve the manifest to determine which files
1526 # resolve the manifest to determine which files
1528 # we care about merging
1527 # we care about merging
1529 self.ui.note(_("resolving manifests\n"))
1528 self.ui.note(_("resolving manifests\n"))
1530 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1529 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1531 (force, allow, moddirstate, linear_path))
1530 (force, allow, moddirstate, linear_path))
1532 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1531 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1533 (short(man), short(m1n), short(m2n)))
1532 (short(man), short(m1n), short(m2n)))
1534
1533
1535 merge = {}
1534 merge = {}
1536 get = {}
1535 get = {}
1537 remove = []
1536 remove = []
1538
1537
1539 # construct a working dir manifest
1538 # construct a working dir manifest
1540 mw = m1.copy()
1539 mw = m1.copy()
1541 mfw = mf1.copy()
1540 mfw = mf1.copy()
1542 umap = dict.fromkeys(unknown)
1541 umap = dict.fromkeys(unknown)
1543
1542
1544 for f in added + modified + unknown:
1543 for f in added + modified + unknown:
1545 mw[f] = ""
1544 mw[f] = ""
1546 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1545 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1547
1546
1548 if moddirstate and not wlock:
1547 if moddirstate and not wlock:
1549 wlock = self.wlock()
1548 wlock = self.wlock()
1550
1549
1551 for f in deleted + removed:
1550 for f in deleted + removed:
1552 if f in mw:
1551 if f in mw:
1553 del mw[f]
1552 del mw[f]
1554
1553
1555 # If we're jumping between revisions (as opposed to merging),
1554 # If we're jumping between revisions (as opposed to merging),
1556 # and if neither the working directory nor the target rev has
1555 # and if neither the working directory nor the target rev has
1557 # the file, then we need to remove it from the dirstate, to
1556 # the file, then we need to remove it from the dirstate, to
1558 # prevent the dirstate from listing the file when it is no
1557 # prevent the dirstate from listing the file when it is no
1559 # longer in the manifest.
1558 # longer in the manifest.
1560 if moddirstate and linear_path and f not in m2:
1559 if moddirstate and linear_path and f not in m2:
1561 self.dirstate.forget((f,))
1560 self.dirstate.forget((f,))
1562
1561
1563 # Compare manifests
1562 # Compare manifests
1564 for f, n in mw.iteritems():
1563 for f, n in mw.iteritems():
1565 if choose and not choose(f):
1564 if choose and not choose(f):
1566 continue
1565 continue
1567 if f in m2:
1566 if f in m2:
1568 s = 0
1567 s = 0
1569
1568
1570 # is the wfile new since m1, and match m2?
1569 # is the wfile new since m1, and match m2?
1571 if f not in m1:
1570 if f not in m1:
1572 t1 = self.wread(f)
1571 t1 = self.wread(f)
1573 t2 = self.file(f).read(m2[f])
1572 t2 = self.file(f).read(m2[f])
1574 if cmp(t1, t2) == 0:
1573 if cmp(t1, t2) == 0:
1575 n = m2[f]
1574 n = m2[f]
1576 del t1, t2
1575 del t1, t2
1577
1576
1578 # are files different?
1577 # are files different?
1579 if n != m2[f]:
1578 if n != m2[f]:
1580 a = ma.get(f, nullid)
1579 a = ma.get(f, nullid)
1581 # are both different from the ancestor?
1580 # are both different from the ancestor?
1582 if n != a and m2[f] != a:
1581 if n != a and m2[f] != a:
1583 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1582 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1584 # merge executable bits
1583 # merge executable bits
1585 # "if we changed or they changed, change in merge"
1584 # "if we changed or they changed, change in merge"
1586 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1585 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1587 mode = ((a^b) | (a^c)) ^ a
1586 mode = ((a^b) | (a^c)) ^ a
1588 merge[f] = (m1.get(f, nullid), m2[f], mode)
1587 merge[f] = (m1.get(f, nullid), m2[f], mode)
1589 s = 1
1588 s = 1
1590 # are we clobbering?
1589 # are we clobbering?
1591 # is remote's version newer?
1590 # is remote's version newer?
1592 # or are we going back in time?
1591 # or are we going back in time?
1593 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1592 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1594 self.ui.debug(_(" remote %s is newer, get\n") % f)
1593 self.ui.debug(_(" remote %s is newer, get\n") % f)
1595 get[f] = m2[f]
1594 get[f] = m2[f]
1596 s = 1
1595 s = 1
1597 elif f in umap or f in added:
1596 elif f in umap or f in added:
1598 # this unknown file is the same as the checkout
1597 # this unknown file is the same as the checkout
1599 # we need to reset the dirstate if the file was added
1598 # we need to reset the dirstate if the file was added
1600 get[f] = m2[f]
1599 get[f] = m2[f]
1601
1600
1602 if not s and mfw[f] != mf2[f]:
1601 if not s and mfw[f] != mf2[f]:
1603 if force:
1602 if force:
1604 self.ui.debug(_(" updating permissions for %s\n") % f)
1603 self.ui.debug(_(" updating permissions for %s\n") % f)
1605 util.set_exec(self.wjoin(f), mf2[f])
1604 util.set_exec(self.wjoin(f), mf2[f])
1606 else:
1605 else:
1607 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1606 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1608 mode = ((a^b) | (a^c)) ^ a
1607 mode = ((a^b) | (a^c)) ^ a
1609 if mode != b:
1608 if mode != b:
1610 self.ui.debug(_(" updating permissions for %s\n")
1609 self.ui.debug(_(" updating permissions for %s\n")
1611 % f)
1610 % f)
1612 util.set_exec(self.wjoin(f), mode)
1611 util.set_exec(self.wjoin(f), mode)
1613 del m2[f]
1612 del m2[f]
1614 elif f in ma:
1613 elif f in ma:
1615 if n != ma[f]:
1614 if n != ma[f]:
1616 r = _("d")
1615 r = _("d")
1617 if not force and (linear_path or allow):
1616 if not force and (linear_path or allow):
1618 r = self.ui.prompt(
1617 r = self.ui.prompt(
1619 (_(" local changed %s which remote deleted\n") % f) +
1618 (_(" local changed %s which remote deleted\n") % f) +
1620 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1619 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1621 if r == _("d"):
1620 if r == _("d"):
1622 remove.append(f)
1621 remove.append(f)
1623 else:
1622 else:
1624 self.ui.debug(_("other deleted %s\n") % f)
1623 self.ui.debug(_("other deleted %s\n") % f)
1625 remove.append(f) # other deleted it
1624 remove.append(f) # other deleted it
1626 else:
1625 else:
1627 # file is created on branch or in working directory
1626 # file is created on branch or in working directory
1628 if force and f not in umap:
1627 if force and f not in umap:
1629 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1628 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1630 remove.append(f)
1629 remove.append(f)
1631 elif n == m1.get(f, nullid): # same as parent
1630 elif n == m1.get(f, nullid): # same as parent
1632 if p2 == pa: # going backwards?
1631 if p2 == pa: # going backwards?
1633 self.ui.debug(_("remote deleted %s\n") % f)
1632 self.ui.debug(_("remote deleted %s\n") % f)
1634 remove.append(f)
1633 remove.append(f)
1635 else:
1634 else:
1636 self.ui.debug(_("local modified %s, keeping\n") % f)
1635 self.ui.debug(_("local modified %s, keeping\n") % f)
1637 else:
1636 else:
1638 self.ui.debug(_("working dir created %s, keeping\n") % f)
1637 self.ui.debug(_("working dir created %s, keeping\n") % f)
1639
1638
1640 for f, n in m2.iteritems():
1639 for f, n in m2.iteritems():
1641 if choose and not choose(f):
1640 if choose and not choose(f):
1642 continue
1641 continue
1643 if f[0] == "/":
1642 if f[0] == "/":
1644 continue
1643 continue
1645 if f in ma and n != ma[f]:
1644 if f in ma and n != ma[f]:
1646 r = _("k")
1645 r = _("k")
1647 if not force and (linear_path or allow):
1646 if not force and (linear_path or allow):
1648 r = self.ui.prompt(
1647 r = self.ui.prompt(
1649 (_("remote changed %s which local deleted\n") % f) +
1648 (_("remote changed %s which local deleted\n") % f) +
1650 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1649 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1651 if r == _("k"):
1650 if r == _("k"):
1652 get[f] = n
1651 get[f] = n
1653 elif f not in ma:
1652 elif f not in ma:
1654 self.ui.debug(_("remote created %s\n") % f)
1653 self.ui.debug(_("remote created %s\n") % f)
1655 get[f] = n
1654 get[f] = n
1656 else:
1655 else:
1657 if force or p2 == pa: # going backwards?
1656 if force or p2 == pa: # going backwards?
1658 self.ui.debug(_("local deleted %s, recreating\n") % f)
1657 self.ui.debug(_("local deleted %s, recreating\n") % f)
1659 get[f] = n
1658 get[f] = n
1660 else:
1659 else:
1661 self.ui.debug(_("local deleted %s\n") % f)
1660 self.ui.debug(_("local deleted %s\n") % f)
1662
1661
1663 del mw, m1, m2, ma
1662 del mw, m1, m2, ma
1664
1663
1665 if force:
1664 if force:
1666 for f in merge:
1665 for f in merge:
1667 get[f] = merge[f][1]
1666 get[f] = merge[f][1]
1668 merge = {}
1667 merge = {}
1669
1668
1670 if linear_path or force:
1669 if linear_path or force:
1671 # we don't need to do any magic, just jump to the new rev
1670 # we don't need to do any magic, just jump to the new rev
1672 branch_merge = False
1671 branch_merge = False
1673 p1, p2 = p2, nullid
1672 p1, p2 = p2, nullid
1674 else:
1673 else:
1675 if not allow:
1674 if not allow:
1676 self.ui.status(_("this update spans a branch"
1675 self.ui.status(_("this update spans a branch"
1677 " affecting the following files:\n"))
1676 " affecting the following files:\n"))
1678 fl = merge.keys() + get.keys()
1677 fl = merge.keys() + get.keys()
1679 fl.sort()
1678 fl.sort()
1680 for f in fl:
1679 for f in fl:
1681 cf = ""
1680 cf = ""
1682 if f in merge:
1681 if f in merge:
1683 cf = _(" (resolve)")
1682 cf = _(" (resolve)")
1684 self.ui.status(" %s%s\n" % (f, cf))
1683 self.ui.status(" %s%s\n" % (f, cf))
1685 self.ui.warn(_("aborting update spanning branches!\n"))
1684 self.ui.warn(_("aborting update spanning branches!\n"))
1686 self.ui.status(_("(use 'hg merge' to merge across branches"
1685 self.ui.status(_("(use 'hg merge' to merge across branches"
1687 " or 'hg update -C' to lose changes)\n"))
1686 " or 'hg update -C' to lose changes)\n"))
1688 return 1
1687 return 1
1689 branch_merge = True
1688 branch_merge = True
1690
1689
1691 # get the files we don't need to change
1690 # get the files we don't need to change
1692 files = get.keys()
1691 files = get.keys()
1693 files.sort()
1692 files.sort()
1694 for f in files:
1693 for f in files:
1695 if f[0] == "/":
1694 if f[0] == "/":
1696 continue
1695 continue
1697 self.ui.note(_("getting %s\n") % f)
1696 self.ui.note(_("getting %s\n") % f)
1698 t = self.file(f).read(get[f])
1697 t = self.file(f).read(get[f])
1699 self.wwrite(f, t)
1698 self.wwrite(f, t)
1700 util.set_exec(self.wjoin(f), mf2[f])
1699 util.set_exec(self.wjoin(f), mf2[f])
1701 if moddirstate:
1700 if moddirstate:
1702 if branch_merge:
1701 if branch_merge:
1703 self.dirstate.update([f], 'n', st_mtime=-1)
1702 self.dirstate.update([f], 'n', st_mtime=-1)
1704 else:
1703 else:
1705 self.dirstate.update([f], 'n')
1704 self.dirstate.update([f], 'n')
1706
1705
1707 # merge the tricky bits
1706 # merge the tricky bits
1708 failedmerge = []
1707 failedmerge = []
1709 files = merge.keys()
1708 files = merge.keys()
1710 files.sort()
1709 files.sort()
1711 xp1 = hex(p1)
1710 xp1 = hex(p1)
1712 xp2 = hex(p2)
1711 xp2 = hex(p2)
1713 for f in files:
1712 for f in files:
1714 self.ui.status(_("merging %s\n") % f)
1713 self.ui.status(_("merging %s\n") % f)
1715 my, other, flag = merge[f]
1714 my, other, flag = merge[f]
1716 ret = self.merge3(f, my, other, xp1, xp2)
1715 ret = self.merge3(f, my, other, xp1, xp2)
1717 if ret:
1716 if ret:
1718 err = True
1717 err = True
1719 failedmerge.append(f)
1718 failedmerge.append(f)
1720 util.set_exec(self.wjoin(f), flag)
1719 util.set_exec(self.wjoin(f), flag)
1721 if moddirstate:
1720 if moddirstate:
1722 if branch_merge:
1721 if branch_merge:
1723 # We've done a branch merge, mark this file as merged
1722 # We've done a branch merge, mark this file as merged
1724 # so that we properly record the merger later
1723 # so that we properly record the merger later
1725 self.dirstate.update([f], 'm')
1724 self.dirstate.update([f], 'm')
1726 else:
1725 else:
1727 # We've update-merged a locally modified file, so
1726 # We've update-merged a locally modified file, so
1728 # we set the dirstate to emulate a normal checkout
1727 # we set the dirstate to emulate a normal checkout
1729 # of that file some time in the past. Thus our
1728 # of that file some time in the past. Thus our
1730 # merge will appear as a normal local file
1729 # merge will appear as a normal local file
1731 # modification.
1730 # modification.
1732 f_len = len(self.file(f).read(other))
1731 f_len = len(self.file(f).read(other))
1733 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1732 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1734
1733
1735 remove.sort()
1734 remove.sort()
1736 for f in remove:
1735 for f in remove:
1737 self.ui.note(_("removing %s\n") % f)
1736 self.ui.note(_("removing %s\n") % f)
1738 util.audit_path(f)
1737 util.audit_path(f)
1739 try:
1738 try:
1740 util.unlink(self.wjoin(f))
1739 util.unlink(self.wjoin(f))
1741 except OSError, inst:
1740 except OSError, inst:
1742 if inst.errno != errno.ENOENT:
1741 if inst.errno != errno.ENOENT:
1743 self.ui.warn(_("update failed to remove %s: %s!\n") %
1742 self.ui.warn(_("update failed to remove %s: %s!\n") %
1744 (f, inst.strerror))
1743 (f, inst.strerror))
1745 if moddirstate:
1744 if moddirstate:
1746 if branch_merge:
1745 if branch_merge:
1747 self.dirstate.update(remove, 'r')
1746 self.dirstate.update(remove, 'r')
1748 else:
1747 else:
1749 self.dirstate.forget(remove)
1748 self.dirstate.forget(remove)
1750
1749
1751 if moddirstate:
1750 if moddirstate:
1752 self.dirstate.setparents(p1, p2)
1751 self.dirstate.setparents(p1, p2)
1753
1752
1754 stat = ((len(get), _("updated")),
1753 stat = ((len(get), _("updated")),
1755 (len(merge) - len(failedmerge), _("merged")),
1754 (len(merge) - len(failedmerge), _("merged")),
1756 (len(remove), _("removed")),
1755 (len(remove), _("removed")),
1757 (len(failedmerge), _("unresolved")))
1756 (len(failedmerge), _("unresolved")))
1758 note = ", ".join([_("%d files %s") % s for s in stat])
1757 note = ", ".join([_("%d files %s") % s for s in stat])
1759 self.ui.note("%s\n" % note)
1758 self.ui.note("%s\n" % note)
1760 if moddirstate and branch_merge:
1759 if moddirstate and branch_merge:
1761 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1760 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1762
1761
1763 return err
1762 return err
1764
1763
1765 def merge3(self, fn, my, other, p1, p2):
1764 def merge3(self, fn, my, other, p1, p2):
1766 """perform a 3-way merge in the working directory"""
1765 """perform a 3-way merge in the working directory"""
1767
1766
1768 def temp(prefix, node):
1767 def temp(prefix, node):
1769 pre = "%s~%s." % (os.path.basename(fn), prefix)
1768 pre = "%s~%s." % (os.path.basename(fn), prefix)
1770 (fd, name) = tempfile.mkstemp("", pre)
1769 (fd, name) = tempfile.mkstemp("", pre)
1771 f = os.fdopen(fd, "wb")
1770 f = os.fdopen(fd, "wb")
1772 self.wwrite(fn, fl.read(node), f)
1771 self.wwrite(fn, fl.read(node), f)
1773 f.close()
1772 f.close()
1774 return name
1773 return name
1775
1774
1776 fl = self.file(fn)
1775 fl = self.file(fn)
1777 base = fl.ancestor(my, other)
1776 base = fl.ancestor(my, other)
1778 a = self.wjoin(fn)
1777 a = self.wjoin(fn)
1779 b = temp("base", base)
1778 b = temp("base", base)
1780 c = temp("other", other)
1779 c = temp("other", other)
1781
1780
1782 self.ui.note(_("resolving %s\n") % fn)
1781 self.ui.note(_("resolving %s\n") % fn)
1783 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1782 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1784 (fn, short(my), short(other), short(base)))
1783 (fn, short(my), short(other), short(base)))
1785
1784
1786 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1785 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1787 or "hgmerge")
1786 or "hgmerge")
1788 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1787 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1789 environ={'HG_FILE': fn,
1788 environ={'HG_FILE': fn,
1790 'HG_MY_NODE': p1,
1789 'HG_MY_NODE': p1,
1791 'HG_OTHER_NODE': p2,
1790 'HG_OTHER_NODE': p2,
1792 'HG_FILE_MY_NODE': hex(my),
1791 'HG_FILE_MY_NODE': hex(my),
1793 'HG_FILE_OTHER_NODE': hex(other),
1792 'HG_FILE_OTHER_NODE': hex(other),
1794 'HG_FILE_BASE_NODE': hex(base)})
1793 'HG_FILE_BASE_NODE': hex(base)})
1795 if r:
1794 if r:
1796 self.ui.warn(_("merging %s failed!\n") % fn)
1795 self.ui.warn(_("merging %s failed!\n") % fn)
1797
1796
1798 os.unlink(b)
1797 os.unlink(b)
1799 os.unlink(c)
1798 os.unlink(c)
1800 return r
1799 return r
1801
1800
1802 def verify(self):
1801 def verify(self):
1803 filelinkrevs = {}
1802 filelinkrevs = {}
1804 filenodes = {}
1803 filenodes = {}
1805 changesets = revisions = files = 0
1804 changesets = revisions = files = 0
1806 errors = [0]
1805 errors = [0]
1807 neededmanifests = {}
1806 neededmanifests = {}
1808
1807
1809 def err(msg):
1808 def err(msg):
1810 self.ui.warn(msg + "\n")
1809 self.ui.warn(msg + "\n")
1811 errors[0] += 1
1810 errors[0] += 1
1812
1811
1813 def checksize(obj, name):
1812 def checksize(obj, name):
1814 d = obj.checksize()
1813 d = obj.checksize()
1815 if d[0]:
1814 if d[0]:
1816 err(_("%s data length off by %d bytes") % (name, d[0]))
1815 err(_("%s data length off by %d bytes") % (name, d[0]))
1817 if d[1]:
1816 if d[1]:
1818 err(_("%s index contains %d extra bytes") % (name, d[1]))
1817 err(_("%s index contains %d extra bytes") % (name, d[1]))
1819
1818
1820 seen = {}
1819 seen = {}
1821 self.ui.status(_("checking changesets\n"))
1820 self.ui.status(_("checking changesets\n"))
1822 checksize(self.changelog, "changelog")
1821 checksize(self.changelog, "changelog")
1823
1822
1824 for i in range(self.changelog.count()):
1823 for i in range(self.changelog.count()):
1825 changesets += 1
1824 changesets += 1
1826 n = self.changelog.node(i)
1825 n = self.changelog.node(i)
1827 l = self.changelog.linkrev(n)
1826 l = self.changelog.linkrev(n)
1828 if l != i:
1827 if l != i:
1829 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1828 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1830 if n in seen:
1829 if n in seen:
1831 err(_("duplicate changeset at revision %d") % i)
1830 err(_("duplicate changeset at revision %d") % i)
1832 seen[n] = 1
1831 seen[n] = 1
1833
1832
1834 for p in self.changelog.parents(n):
1833 for p in self.changelog.parents(n):
1835 if p not in self.changelog.nodemap:
1834 if p not in self.changelog.nodemap:
1836 err(_("changeset %s has unknown parent %s") %
1835 err(_("changeset %s has unknown parent %s") %
1837 (short(n), short(p)))
1836 (short(n), short(p)))
1838 try:
1837 try:
1839 changes = self.changelog.read(n)
1838 changes = self.changelog.read(n)
1840 except KeyboardInterrupt:
1839 except KeyboardInterrupt:
1841 self.ui.warn(_("interrupted"))
1840 self.ui.warn(_("interrupted"))
1842 raise
1841 raise
1843 except Exception, inst:
1842 except Exception, inst:
1844 err(_("unpacking changeset %s: %s") % (short(n), inst))
1843 err(_("unpacking changeset %s: %s") % (short(n), inst))
1845 continue
1844 continue
1846
1845
1847 neededmanifests[changes[0]] = n
1846 neededmanifests[changes[0]] = n
1848
1847
1849 for f in changes[3]:
1848 for f in changes[3]:
1850 filelinkrevs.setdefault(f, []).append(i)
1849 filelinkrevs.setdefault(f, []).append(i)
1851
1850
1852 seen = {}
1851 seen = {}
1853 self.ui.status(_("checking manifests\n"))
1852 self.ui.status(_("checking manifests\n"))
1854 checksize(self.manifest, "manifest")
1853 checksize(self.manifest, "manifest")
1855
1854
1856 for i in range(self.manifest.count()):
1855 for i in range(self.manifest.count()):
1857 n = self.manifest.node(i)
1856 n = self.manifest.node(i)
1858 l = self.manifest.linkrev(n)
1857 l = self.manifest.linkrev(n)
1859
1858
1860 if l < 0 or l >= self.changelog.count():
1859 if l < 0 or l >= self.changelog.count():
1861 err(_("bad manifest link (%d) at revision %d") % (l, i))
1860 err(_("bad manifest link (%d) at revision %d") % (l, i))
1862
1861
1863 if n in neededmanifests:
1862 if n in neededmanifests:
1864 del neededmanifests[n]
1863 del neededmanifests[n]
1865
1864
1866 if n in seen:
1865 if n in seen:
1867 err(_("duplicate manifest at revision %d") % i)
1866 err(_("duplicate manifest at revision %d") % i)
1868
1867
1869 seen[n] = 1
1868 seen[n] = 1
1870
1869
1871 for p in self.manifest.parents(n):
1870 for p in self.manifest.parents(n):
1872 if p not in self.manifest.nodemap:
1871 if p not in self.manifest.nodemap:
1873 err(_("manifest %s has unknown parent %s") %
1872 err(_("manifest %s has unknown parent %s") %
1874 (short(n), short(p)))
1873 (short(n), short(p)))
1875
1874
1876 try:
1875 try:
1877 delta = mdiff.patchtext(self.manifest.delta(n))
1876 delta = mdiff.patchtext(self.manifest.delta(n))
1878 except KeyboardInterrupt:
1877 except KeyboardInterrupt:
1879 self.ui.warn(_("interrupted"))
1878 self.ui.warn(_("interrupted"))
1880 raise
1879 raise
1881 except Exception, inst:
1880 except Exception, inst:
1882 err(_("unpacking manifest %s: %s") % (short(n), inst))
1881 err(_("unpacking manifest %s: %s") % (short(n), inst))
1883 continue
1882 continue
1884
1883
1885 try:
1884 try:
1886 ff = [ l.split('\0') for l in delta.splitlines() ]
1885 ff = [ l.split('\0') for l in delta.splitlines() ]
1887 for f, fn in ff:
1886 for f, fn in ff:
1888 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1887 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1889 except (ValueError, TypeError), inst:
1888 except (ValueError, TypeError), inst:
1890 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1889 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1891
1890
1892 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1891 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1893
1892
1894 for m, c in neededmanifests.items():
1893 for m, c in neededmanifests.items():
1895 err(_("Changeset %s refers to unknown manifest %s") %
1894 err(_("Changeset %s refers to unknown manifest %s") %
1896 (short(m), short(c)))
1895 (short(m), short(c)))
1897 del neededmanifests
1896 del neededmanifests
1898
1897
1899 for f in filenodes:
1898 for f in filenodes:
1900 if f not in filelinkrevs:
1899 if f not in filelinkrevs:
1901 err(_("file %s in manifest but not in changesets") % f)
1900 err(_("file %s in manifest but not in changesets") % f)
1902
1901
1903 for f in filelinkrevs:
1902 for f in filelinkrevs:
1904 if f not in filenodes:
1903 if f not in filenodes:
1905 err(_("file %s in changeset but not in manifest") % f)
1904 err(_("file %s in changeset but not in manifest") % f)
1906
1905
1907 self.ui.status(_("checking files\n"))
1906 self.ui.status(_("checking files\n"))
1908 ff = filenodes.keys()
1907 ff = filenodes.keys()
1909 ff.sort()
1908 ff.sort()
1910 for f in ff:
1909 for f in ff:
1911 if f == "/dev/null":
1910 if f == "/dev/null":
1912 continue
1911 continue
1913 files += 1
1912 files += 1
1914 if not f:
1913 if not f:
1915 err(_("file without name in manifest %s") % short(n))
1914 err(_("file without name in manifest %s") % short(n))
1916 continue
1915 continue
1917 fl = self.file(f)
1916 fl = self.file(f)
1918 checksize(fl, f)
1917 checksize(fl, f)
1919
1918
1920 nodes = {nullid: 1}
1919 nodes = {nullid: 1}
1921 seen = {}
1920 seen = {}
1922 for i in range(fl.count()):
1921 for i in range(fl.count()):
1923 revisions += 1
1922 revisions += 1
1924 n = fl.node(i)
1923 n = fl.node(i)
1925
1924
1926 if n in seen:
1925 if n in seen:
1927 err(_("%s: duplicate revision %d") % (f, i))
1926 err(_("%s: duplicate revision %d") % (f, i))
1928 if n not in filenodes[f]:
1927 if n not in filenodes[f]:
1929 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1928 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1930 else:
1929 else:
1931 del filenodes[f][n]
1930 del filenodes[f][n]
1932
1931
1933 flr = fl.linkrev(n)
1932 flr = fl.linkrev(n)
1934 if flr not in filelinkrevs.get(f, []):
1933 if flr not in filelinkrevs.get(f, []):
1935 err(_("%s:%s points to unexpected changeset %d")
1934 err(_("%s:%s points to unexpected changeset %d")
1936 % (f, short(n), flr))
1935 % (f, short(n), flr))
1937 else:
1936 else:
1938 filelinkrevs[f].remove(flr)
1937 filelinkrevs[f].remove(flr)
1939
1938
1940 # verify contents
1939 # verify contents
1941 try:
1940 try:
1942 t = fl.read(n)
1941 t = fl.read(n)
1943 except KeyboardInterrupt:
1942 except KeyboardInterrupt:
1944 self.ui.warn(_("interrupted"))
1943 self.ui.warn(_("interrupted"))
1945 raise
1944 raise
1946 except Exception, inst:
1945 except Exception, inst:
1947 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1946 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1948
1947
1949 # verify parents
1948 # verify parents
1950 (p1, p2) = fl.parents(n)
1949 (p1, p2) = fl.parents(n)
1951 if p1 not in nodes:
1950 if p1 not in nodes:
1952 err(_("file %s:%s unknown parent 1 %s") %
1951 err(_("file %s:%s unknown parent 1 %s") %
1953 (f, short(n), short(p1)))
1952 (f, short(n), short(p1)))
1954 if p2 not in nodes:
1953 if p2 not in nodes:
1955 err(_("file %s:%s unknown parent 2 %s") %
1954 err(_("file %s:%s unknown parent 2 %s") %
1956 (f, short(n), short(p1)))
1955 (f, short(n), short(p1)))
1957 nodes[n] = 1
1956 nodes[n] = 1
1958
1957
1959 # cross-check
1958 # cross-check
1960 for node in filenodes[f]:
1959 for node in filenodes[f]:
1961 err(_("node %s in manifests not in %s") % (hex(node), f))
1960 err(_("node %s in manifests not in %s") % (hex(node), f))
1962
1961
1963 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1962 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1964 (files, changesets, revisions))
1963 (files, changesets, revisions))
1965
1964
1966 if errors[0]:
1965 if errors[0]:
1967 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1966 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1968 return 1
1967 return 1
1969
1968
1970 # used to avoid circular references so destructors work
1969 # used to avoid circular references so destructors work
1971 def aftertrans(base):
1970 def aftertrans(base):
1972 p = base
1971 p = base
1973 def a():
1972 def a():
1974 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1973 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1975 util.rename(os.path.join(p, "journal.dirstate"),
1974 util.rename(os.path.join(p, "journal.dirstate"),
1976 os.path.join(p, "undo.dirstate"))
1975 os.path.join(p, "undo.dirstate"))
1977 return a
1976 return a
1978
1977
General Comments 0
You need to be logged in to leave comments. Login now