##// END OF EJS Templates
localrepo: refactor the locking functions
Benoit Boissinot -
r1751:e9bf415a default
parent child Browse files
Show More
@@ -1,1853 +1,1853 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct, os, util
8 import struct, os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14
14
15 class localrepository(object):
15 class localrepository(object):
16 def __init__(self, ui, path=None, create=0):
16 def __init__(self, ui, path=None, create=0):
17 if not path:
17 if not path:
18 p = os.getcwd()
18 p = os.getcwd()
19 while not os.path.isdir(os.path.join(p, ".hg")):
19 while not os.path.isdir(os.path.join(p, ".hg")):
20 oldp = p
20 oldp = p
21 p = os.path.dirname(p)
21 p = os.path.dirname(p)
22 if p == oldp:
22 if p == oldp:
23 raise repo.RepoError(_("no repo found"))
23 raise repo.RepoError(_("no repo found"))
24 path = p
24 path = p
25 self.path = os.path.join(path, ".hg")
25 self.path = os.path.join(path, ".hg")
26
26
27 if not create and not os.path.isdir(self.path):
27 if not create and not os.path.isdir(self.path):
28 raise repo.RepoError(_("repository %s not found") % path)
28 raise repo.RepoError(_("repository %s not found") % path)
29
29
30 self.root = os.path.abspath(path)
30 self.root = os.path.abspath(path)
31 self.ui = ui
31 self.ui = ui
32 self.opener = util.opener(self.path)
32 self.opener = util.opener(self.path)
33 self.wopener = util.opener(self.root)
33 self.wopener = util.opener(self.root)
34 self.manifest = manifest.manifest(self.opener)
34 self.manifest = manifest.manifest(self.opener)
35 self.changelog = changelog.changelog(self.opener)
35 self.changelog = changelog.changelog(self.opener)
36 self.tagscache = None
36 self.tagscache = None
37 self.nodetagscache = None
37 self.nodetagscache = None
38 self.encodepats = None
38 self.encodepats = None
39 self.decodepats = None
39 self.decodepats = None
40
40
41 if create:
41 if create:
42 os.mkdir(self.path)
42 os.mkdir(self.path)
43 os.mkdir(self.join("data"))
43 os.mkdir(self.join("data"))
44
44
45 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
45 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
46 try:
46 try:
47 self.ui.readconfig(self.join("hgrc"))
47 self.ui.readconfig(self.join("hgrc"))
48 except IOError:
48 except IOError:
49 pass
49 pass
50
50
51 def hook(self, name, throw=False, **args):
51 def hook(self, name, throw=False, **args):
52 def runhook(name, cmd):
52 def runhook(name, cmd):
53 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
53 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
54 old = {}
54 old = {}
55 for k, v in args.items():
55 for k, v in args.items():
56 k = k.upper()
56 k = k.upper()
57 old['HG_' + k] = os.environ.get(k, None)
57 old['HG_' + k] = os.environ.get(k, None)
58 old[k] = os.environ.get(k, None)
58 old[k] = os.environ.get(k, None)
59 os.environ['HG_' + k] = str(v)
59 os.environ['HG_' + k] = str(v)
60 os.environ[k] = str(v)
60 os.environ[k] = str(v)
61
61
62 try:
62 try:
63 # Hooks run in the repository root
63 # Hooks run in the repository root
64 olddir = os.getcwd()
64 olddir = os.getcwd()
65 os.chdir(self.root)
65 os.chdir(self.root)
66 r = os.system(cmd)
66 r = os.system(cmd)
67 finally:
67 finally:
68 for k, v in old.items():
68 for k, v in old.items():
69 if v is not None:
69 if v is not None:
70 os.environ[k] = v
70 os.environ[k] = v
71 else:
71 else:
72 del os.environ[k]
72 del os.environ[k]
73
73
74 os.chdir(olddir)
74 os.chdir(olddir)
75
75
76 if r:
76 if r:
77 desc, r = util.explain_exit(r)
77 desc, r = util.explain_exit(r)
78 if throw:
78 if throw:
79 raise util.Abort(_('%s hook %s') % (name, desc))
79 raise util.Abort(_('%s hook %s') % (name, desc))
80 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
80 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
81 return False
81 return False
82 return True
82 return True
83
83
84 r = True
84 r = True
85 for hname, cmd in self.ui.configitems("hooks"):
85 for hname, cmd in self.ui.configitems("hooks"):
86 s = hname.split(".")
86 s = hname.split(".")
87 if s[0] == name and cmd:
87 if s[0] == name and cmd:
88 r = runhook(hname, cmd) and r
88 r = runhook(hname, cmd) and r
89 return r
89 return r
90
90
91 def tags(self):
91 def tags(self):
92 '''return a mapping of tag to node'''
92 '''return a mapping of tag to node'''
93 if not self.tagscache:
93 if not self.tagscache:
94 self.tagscache = {}
94 self.tagscache = {}
95 def addtag(self, k, n):
95 def addtag(self, k, n):
96 try:
96 try:
97 bin_n = bin(n)
97 bin_n = bin(n)
98 except TypeError:
98 except TypeError:
99 bin_n = ''
99 bin_n = ''
100 self.tagscache[k.strip()] = bin_n
100 self.tagscache[k.strip()] = bin_n
101
101
102 try:
102 try:
103 # read each head of the tags file, ending with the tip
103 # read each head of the tags file, ending with the tip
104 # and add each tag found to the map, with "newer" ones
104 # and add each tag found to the map, with "newer" ones
105 # taking precedence
105 # taking precedence
106 fl = self.file(".hgtags")
106 fl = self.file(".hgtags")
107 h = fl.heads()
107 h = fl.heads()
108 h.reverse()
108 h.reverse()
109 for r in h:
109 for r in h:
110 for l in fl.read(r).splitlines():
110 for l in fl.read(r).splitlines():
111 if l:
111 if l:
112 n, k = l.split(" ", 1)
112 n, k = l.split(" ", 1)
113 addtag(self, k, n)
113 addtag(self, k, n)
114 except KeyError:
114 except KeyError:
115 pass
115 pass
116
116
117 try:
117 try:
118 f = self.opener("localtags")
118 f = self.opener("localtags")
119 for l in f:
119 for l in f:
120 n, k = l.split(" ", 1)
120 n, k = l.split(" ", 1)
121 addtag(self, k, n)
121 addtag(self, k, n)
122 except IOError:
122 except IOError:
123 pass
123 pass
124
124
125 self.tagscache['tip'] = self.changelog.tip()
125 self.tagscache['tip'] = self.changelog.tip()
126
126
127 return self.tagscache
127 return self.tagscache
128
128
129 def tagslist(self):
129 def tagslist(self):
130 '''return a list of tags ordered by revision'''
130 '''return a list of tags ordered by revision'''
131 l = []
131 l = []
132 for t, n in self.tags().items():
132 for t, n in self.tags().items():
133 try:
133 try:
134 r = self.changelog.rev(n)
134 r = self.changelog.rev(n)
135 except:
135 except:
136 r = -2 # sort to the beginning of the list if unknown
136 r = -2 # sort to the beginning of the list if unknown
137 l.append((r, t, n))
137 l.append((r, t, n))
138 l.sort()
138 l.sort()
139 return [(t, n) for r, t, n in l]
139 return [(t, n) for r, t, n in l]
140
140
141 def nodetags(self, node):
141 def nodetags(self, node):
142 '''return the tags associated with a node'''
142 '''return the tags associated with a node'''
143 if not self.nodetagscache:
143 if not self.nodetagscache:
144 self.nodetagscache = {}
144 self.nodetagscache = {}
145 for t, n in self.tags().items():
145 for t, n in self.tags().items():
146 self.nodetagscache.setdefault(n, []).append(t)
146 self.nodetagscache.setdefault(n, []).append(t)
147 return self.nodetagscache.get(node, [])
147 return self.nodetagscache.get(node, [])
148
148
149 def lookup(self, key):
149 def lookup(self, key):
150 try:
150 try:
151 return self.tags()[key]
151 return self.tags()[key]
152 except KeyError:
152 except KeyError:
153 try:
153 try:
154 return self.changelog.lookup(key)
154 return self.changelog.lookup(key)
155 except:
155 except:
156 raise repo.RepoError(_("unknown revision '%s'") % key)
156 raise repo.RepoError(_("unknown revision '%s'") % key)
157
157
158 def dev(self):
158 def dev(self):
159 return os.stat(self.path).st_dev
159 return os.stat(self.path).st_dev
160
160
161 def local(self):
161 def local(self):
162 return True
162 return True
163
163
164 def join(self, f):
164 def join(self, f):
165 return os.path.join(self.path, f)
165 return os.path.join(self.path, f)
166
166
167 def wjoin(self, f):
167 def wjoin(self, f):
168 return os.path.join(self.root, f)
168 return os.path.join(self.root, f)
169
169
170 def file(self, f):
170 def file(self, f):
171 if f[0] == '/':
171 if f[0] == '/':
172 f = f[1:]
172 f = f[1:]
173 return filelog.filelog(self.opener, f)
173 return filelog.filelog(self.opener, f)
174
174
175 def getcwd(self):
175 def getcwd(self):
176 return self.dirstate.getcwd()
176 return self.dirstate.getcwd()
177
177
178 def wfile(self, f, mode='r'):
178 def wfile(self, f, mode='r'):
179 return self.wopener(f, mode)
179 return self.wopener(f, mode)
180
180
181 def wread(self, filename):
181 def wread(self, filename):
182 if self.encodepats == None:
182 if self.encodepats == None:
183 l = []
183 l = []
184 for pat, cmd in self.ui.configitems("encode"):
184 for pat, cmd in self.ui.configitems("encode"):
185 mf = util.matcher("", "/", [pat], [], [])[1]
185 mf = util.matcher("", "/", [pat], [], [])[1]
186 l.append((mf, cmd))
186 l.append((mf, cmd))
187 self.encodepats = l
187 self.encodepats = l
188
188
189 data = self.wopener(filename, 'r').read()
189 data = self.wopener(filename, 'r').read()
190
190
191 for mf, cmd in self.encodepats:
191 for mf, cmd in self.encodepats:
192 if mf(filename):
192 if mf(filename):
193 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
193 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
194 data = util.filter(data, cmd)
194 data = util.filter(data, cmd)
195 break
195 break
196
196
197 return data
197 return data
198
198
199 def wwrite(self, filename, data, fd=None):
199 def wwrite(self, filename, data, fd=None):
200 if self.decodepats == None:
200 if self.decodepats == None:
201 l = []
201 l = []
202 for pat, cmd in self.ui.configitems("decode"):
202 for pat, cmd in self.ui.configitems("decode"):
203 mf = util.matcher("", "/", [pat], [], [])[1]
203 mf = util.matcher("", "/", [pat], [], [])[1]
204 l.append((mf, cmd))
204 l.append((mf, cmd))
205 self.decodepats = l
205 self.decodepats = l
206
206
207 for mf, cmd in self.decodepats:
207 for mf, cmd in self.decodepats:
208 if mf(filename):
208 if mf(filename):
209 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
209 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
210 data = util.filter(data, cmd)
210 data = util.filter(data, cmd)
211 break
211 break
212
212
213 if fd:
213 if fd:
214 return fd.write(data)
214 return fd.write(data)
215 return self.wopener(filename, 'w').write(data)
215 return self.wopener(filename, 'w').write(data)
216
216
217 def transaction(self):
217 def transaction(self):
218 # save dirstate for undo
218 # save dirstate for undo
219 try:
219 try:
220 ds = self.opener("dirstate").read()
220 ds = self.opener("dirstate").read()
221 except IOError:
221 except IOError:
222 ds = ""
222 ds = ""
223 self.opener("journal.dirstate", "w").write(ds)
223 self.opener("journal.dirstate", "w").write(ds)
224
224
225 def after():
225 def after():
226 util.rename(self.join("journal"), self.join("undo"))
226 util.rename(self.join("journal"), self.join("undo"))
227 util.rename(self.join("journal.dirstate"),
227 util.rename(self.join("journal.dirstate"),
228 self.join("undo.dirstate"))
228 self.join("undo.dirstate"))
229
229
230 return transaction.transaction(self.ui.warn, self.opener,
230 return transaction.transaction(self.ui.warn, self.opener,
231 self.join("journal"), after)
231 self.join("journal"), after)
232
232
233 def recover(self):
233 def recover(self):
234 lock = self.lock()
234 lock = self.lock()
235 if os.path.exists(self.join("journal")):
235 if os.path.exists(self.join("journal")):
236 self.ui.status(_("rolling back interrupted transaction\n"))
236 self.ui.status(_("rolling back interrupted transaction\n"))
237 transaction.rollback(self.opener, self.join("journal"))
237 transaction.rollback(self.opener, self.join("journal"))
238 self.manifest = manifest.manifest(self.opener)
238 self.manifest = manifest.manifest(self.opener)
239 self.changelog = changelog.changelog(self.opener)
239 self.changelog = changelog.changelog(self.opener)
240 return True
240 return True
241 else:
241 else:
242 self.ui.warn(_("no interrupted transaction available\n"))
242 self.ui.warn(_("no interrupted transaction available\n"))
243 return False
243 return False
244
244
245 def undo(self, wlock=None):
245 def undo(self, wlock=None):
246 if not wlock:
246 if not wlock:
247 wlock = self.wlock()
247 wlock = self.wlock()
248 lock = self.lock()
248 lock = self.lock()
249 if os.path.exists(self.join("undo")):
249 if os.path.exists(self.join("undo")):
250 self.ui.status(_("rolling back last transaction\n"))
250 self.ui.status(_("rolling back last transaction\n"))
251 transaction.rollback(self.opener, self.join("undo"))
251 transaction.rollback(self.opener, self.join("undo"))
252 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
252 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
253 self.dirstate.read()
253 self.dirstate.read()
254 else:
254 else:
255 self.ui.warn(_("no undo information available\n"))
255 self.ui.warn(_("no undo information available\n"))
256
256
257 def lock(self, wait=1):
257 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
258 try:
258 try:
259 return lock.lock(self.join("lock"), 0)
259 l = lock.lock(self.join(lockname), 0, releasefn)
260 except lock.LockHeld, inst:
261 if wait:
262 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
263 return lock.lock(self.join("lock"), wait)
264 raise inst
265
266 def wlock(self, wait=1):
267 try:
268 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
269 except lock.LockHeld, inst:
260 except lock.LockHeld, inst:
270 if not wait:
261 if not wait:
271 raise inst
262 raise inst
272 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
263 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
273 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
264 l = lock.lock(self.join(lockname), wait, releasefn)
274 self.dirstate.read()
265 if acquirefn:
275 return wlock
266 acquirefn()
267 return l
268
269 def lock(self, wait=1):
270 return self.do_lock("lock", wait)
271
272 def wlock(self, wait=1):
273 return self.do_lock("wlock", wait,
274 self.dirstate.write,
275 self.dirstate.read)
276
276
277 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
277 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
278 "determine whether a new filenode is needed"
278 "determine whether a new filenode is needed"
279 fp1 = manifest1.get(filename, nullid)
279 fp1 = manifest1.get(filename, nullid)
280 fp2 = manifest2.get(filename, nullid)
280 fp2 = manifest2.get(filename, nullid)
281
281
282 if fp2 != nullid:
282 if fp2 != nullid:
283 # is one parent an ancestor of the other?
283 # is one parent an ancestor of the other?
284 fpa = filelog.ancestor(fp1, fp2)
284 fpa = filelog.ancestor(fp1, fp2)
285 if fpa == fp1:
285 if fpa == fp1:
286 fp1, fp2 = fp2, nullid
286 fp1, fp2 = fp2, nullid
287 elif fpa == fp2:
287 elif fpa == fp2:
288 fp2 = nullid
288 fp2 = nullid
289
289
290 # is the file unmodified from the parent? report existing entry
290 # is the file unmodified from the parent? report existing entry
291 if fp2 == nullid and text == filelog.read(fp1):
291 if fp2 == nullid and text == filelog.read(fp1):
292 return (fp1, None, None)
292 return (fp1, None, None)
293
293
294 return (None, fp1, fp2)
294 return (None, fp1, fp2)
295
295
296 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
296 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
297 orig_parent = self.dirstate.parents()[0] or nullid
297 orig_parent = self.dirstate.parents()[0] or nullid
298 p1 = p1 or self.dirstate.parents()[0] or nullid
298 p1 = p1 or self.dirstate.parents()[0] or nullid
299 p2 = p2 or self.dirstate.parents()[1] or nullid
299 p2 = p2 or self.dirstate.parents()[1] or nullid
300 c1 = self.changelog.read(p1)
300 c1 = self.changelog.read(p1)
301 c2 = self.changelog.read(p2)
301 c2 = self.changelog.read(p2)
302 m1 = self.manifest.read(c1[0])
302 m1 = self.manifest.read(c1[0])
303 mf1 = self.manifest.readflags(c1[0])
303 mf1 = self.manifest.readflags(c1[0])
304 m2 = self.manifest.read(c2[0])
304 m2 = self.manifest.read(c2[0])
305 changed = []
305 changed = []
306
306
307 if orig_parent == p1:
307 if orig_parent == p1:
308 update_dirstate = 1
308 update_dirstate = 1
309 else:
309 else:
310 update_dirstate = 0
310 update_dirstate = 0
311
311
312 if not wlock:
312 if not wlock:
313 wlock = self.wlock()
313 wlock = self.wlock()
314 lock = self.lock()
314 lock = self.lock()
315 tr = self.transaction()
315 tr = self.transaction()
316 mm = m1.copy()
316 mm = m1.copy()
317 mfm = mf1.copy()
317 mfm = mf1.copy()
318 linkrev = self.changelog.count()
318 linkrev = self.changelog.count()
319 for f in files:
319 for f in files:
320 try:
320 try:
321 t = self.wread(f)
321 t = self.wread(f)
322 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
322 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
323 r = self.file(f)
323 r = self.file(f)
324 mfm[f] = tm
324 mfm[f] = tm
325
325
326 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
326 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
327 if entry:
327 if entry:
328 mm[f] = entry
328 mm[f] = entry
329 continue
329 continue
330
330
331 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
331 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
332 changed.append(f)
332 changed.append(f)
333 if update_dirstate:
333 if update_dirstate:
334 self.dirstate.update([f], "n")
334 self.dirstate.update([f], "n")
335 except IOError:
335 except IOError:
336 try:
336 try:
337 del mm[f]
337 del mm[f]
338 del mfm[f]
338 del mfm[f]
339 if update_dirstate:
339 if update_dirstate:
340 self.dirstate.forget([f])
340 self.dirstate.forget([f])
341 except:
341 except:
342 # deleted from p2?
342 # deleted from p2?
343 pass
343 pass
344
344
345 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
345 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
346 user = user or self.ui.username()
346 user = user or self.ui.username()
347 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
347 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
348 tr.close()
348 tr.close()
349 if update_dirstate:
349 if update_dirstate:
350 self.dirstate.setparents(n, nullid)
350 self.dirstate.setparents(n, nullid)
351
351
352 def commit(self, files=None, text="", user=None, date=None,
352 def commit(self, files=None, text="", user=None, date=None,
353 match=util.always, force=False, wlock=None):
353 match=util.always, force=False, wlock=None):
354 commit = []
354 commit = []
355 remove = []
355 remove = []
356 changed = []
356 changed = []
357
357
358 if files:
358 if files:
359 for f in files:
359 for f in files:
360 s = self.dirstate.state(f)
360 s = self.dirstate.state(f)
361 if s in 'nmai':
361 if s in 'nmai':
362 commit.append(f)
362 commit.append(f)
363 elif s == 'r':
363 elif s == 'r':
364 remove.append(f)
364 remove.append(f)
365 else:
365 else:
366 self.ui.warn(_("%s not tracked!\n") % f)
366 self.ui.warn(_("%s not tracked!\n") % f)
367 else:
367 else:
368 modified, added, removed, deleted, unknown = self.changes(match=match)
368 modified, added, removed, deleted, unknown = self.changes(match=match)
369 commit = modified + added
369 commit = modified + added
370 remove = removed
370 remove = removed
371
371
372 p1, p2 = self.dirstate.parents()
372 p1, p2 = self.dirstate.parents()
373 c1 = self.changelog.read(p1)
373 c1 = self.changelog.read(p1)
374 c2 = self.changelog.read(p2)
374 c2 = self.changelog.read(p2)
375 m1 = self.manifest.read(c1[0])
375 m1 = self.manifest.read(c1[0])
376 mf1 = self.manifest.readflags(c1[0])
376 mf1 = self.manifest.readflags(c1[0])
377 m2 = self.manifest.read(c2[0])
377 m2 = self.manifest.read(c2[0])
378
378
379 if not commit and not remove and not force and p2 == nullid:
379 if not commit and not remove and not force and p2 == nullid:
380 self.ui.status(_("nothing changed\n"))
380 self.ui.status(_("nothing changed\n"))
381 return None
381 return None
382
382
383 xp1 = hex(p1)
383 xp1 = hex(p1)
384 if p2 == nullid: xp2 = ''
384 if p2 == nullid: xp2 = ''
385 else: xp2 = hex(p2)
385 else: xp2 = hex(p2)
386
386
387 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
387 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
388
388
389 if not wlock:
389 if not wlock:
390 wlock = self.wlock()
390 wlock = self.wlock()
391 lock = self.lock()
391 lock = self.lock()
392 tr = self.transaction()
392 tr = self.transaction()
393
393
394 # check in files
394 # check in files
395 new = {}
395 new = {}
396 linkrev = self.changelog.count()
396 linkrev = self.changelog.count()
397 commit.sort()
397 commit.sort()
398 for f in commit:
398 for f in commit:
399 self.ui.note(f + "\n")
399 self.ui.note(f + "\n")
400 try:
400 try:
401 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
401 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
402 t = self.wread(f)
402 t = self.wread(f)
403 except IOError:
403 except IOError:
404 self.ui.warn(_("trouble committing %s!\n") % f)
404 self.ui.warn(_("trouble committing %s!\n") % f)
405 raise
405 raise
406
406
407 r = self.file(f)
407 r = self.file(f)
408
408
409 meta = {}
409 meta = {}
410 cp = self.dirstate.copied(f)
410 cp = self.dirstate.copied(f)
411 if cp:
411 if cp:
412 meta["copy"] = cp
412 meta["copy"] = cp
413 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
413 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
414 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
414 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
415 fp1, fp2 = nullid, nullid
415 fp1, fp2 = nullid, nullid
416 else:
416 else:
417 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
417 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
418 if entry:
418 if entry:
419 new[f] = entry
419 new[f] = entry
420 continue
420 continue
421
421
422 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
422 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
423 # remember what we've added so that we can later calculate
423 # remember what we've added so that we can later calculate
424 # the files to pull from a set of changesets
424 # the files to pull from a set of changesets
425 changed.append(f)
425 changed.append(f)
426
426
427 # update manifest
427 # update manifest
428 m1 = m1.copy()
428 m1 = m1.copy()
429 m1.update(new)
429 m1.update(new)
430 for f in remove:
430 for f in remove:
431 if f in m1:
431 if f in m1:
432 del m1[f]
432 del m1[f]
433 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
433 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
434 (new, remove))
434 (new, remove))
435
435
436 # add changeset
436 # add changeset
437 new = new.keys()
437 new = new.keys()
438 new.sort()
438 new.sort()
439
439
440 if not text:
440 if not text:
441 edittext = [""]
441 edittext = [""]
442 if p2 != nullid:
442 if p2 != nullid:
443 edittext.append("HG: branch merge")
443 edittext.append("HG: branch merge")
444 edittext.extend(["HG: changed %s" % f for f in changed])
444 edittext.extend(["HG: changed %s" % f for f in changed])
445 edittext.extend(["HG: removed %s" % f for f in remove])
445 edittext.extend(["HG: removed %s" % f for f in remove])
446 if not changed and not remove:
446 if not changed and not remove:
447 edittext.append("HG: no files changed")
447 edittext.append("HG: no files changed")
448 edittext.append("")
448 edittext.append("")
449 # run editor in the repository root
449 # run editor in the repository root
450 olddir = os.getcwd()
450 olddir = os.getcwd()
451 os.chdir(self.root)
451 os.chdir(self.root)
452 edittext = self.ui.edit("\n".join(edittext))
452 edittext = self.ui.edit("\n".join(edittext))
453 os.chdir(olddir)
453 os.chdir(olddir)
454 if not edittext.rstrip():
454 if not edittext.rstrip():
455 return None
455 return None
456 text = edittext
456 text = edittext
457
457
458 user = user or self.ui.username()
458 user = user or self.ui.username()
459 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
459 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
460 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
460 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
461 parent2=xp2)
461 parent2=xp2)
462 tr.close()
462 tr.close()
463
463
464 self.dirstate.setparents(n)
464 self.dirstate.setparents(n)
465 self.dirstate.update(new, "n")
465 self.dirstate.update(new, "n")
466 self.dirstate.forget(remove)
466 self.dirstate.forget(remove)
467
467
468 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
468 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
469 return n
469 return n
470
470
471 def walk(self, node=None, files=[], match=util.always):
471 def walk(self, node=None, files=[], match=util.always):
472 if node:
472 if node:
473 fdict = dict.fromkeys(files)
473 fdict = dict.fromkeys(files)
474 for fn in self.manifest.read(self.changelog.read(node)[0]):
474 for fn in self.manifest.read(self.changelog.read(node)[0]):
475 fdict.pop(fn, None)
475 fdict.pop(fn, None)
476 if match(fn):
476 if match(fn):
477 yield 'm', fn
477 yield 'm', fn
478 for fn in fdict:
478 for fn in fdict:
479 self.ui.warn(_('%s: No such file in rev %s\n') % (
479 self.ui.warn(_('%s: No such file in rev %s\n') % (
480 util.pathto(self.getcwd(), fn), short(node)))
480 util.pathto(self.getcwd(), fn), short(node)))
481 else:
481 else:
482 for src, fn in self.dirstate.walk(files, match):
482 for src, fn in self.dirstate.walk(files, match):
483 yield src, fn
483 yield src, fn
484
484
485 def changes(self, node1=None, node2=None, files=[], match=util.always,
485 def changes(self, node1=None, node2=None, files=[], match=util.always,
486 wlock=None):
486 wlock=None):
487 """return changes between two nodes or node and working directory
487 """return changes between two nodes or node and working directory
488
488
489 If node1 is None, use the first dirstate parent instead.
489 If node1 is None, use the first dirstate parent instead.
490 If node2 is None, compare node1 with working directory.
490 If node2 is None, compare node1 with working directory.
491 """
491 """
492
492
493 def fcmp(fn, mf):
493 def fcmp(fn, mf):
494 t1 = self.wread(fn)
494 t1 = self.wread(fn)
495 t2 = self.file(fn).read(mf.get(fn, nullid))
495 t2 = self.file(fn).read(mf.get(fn, nullid))
496 return cmp(t1, t2)
496 return cmp(t1, t2)
497
497
498 def mfmatches(node):
498 def mfmatches(node):
499 change = self.changelog.read(node)
499 change = self.changelog.read(node)
500 mf = dict(self.manifest.read(change[0]))
500 mf = dict(self.manifest.read(change[0]))
501 for fn in mf.keys():
501 for fn in mf.keys():
502 if not match(fn):
502 if not match(fn):
503 del mf[fn]
503 del mf[fn]
504 return mf
504 return mf
505
505
506 # are we comparing the working directory?
506 # are we comparing the working directory?
507 if not node2:
507 if not node2:
508 if not wlock:
508 if not wlock:
509 try:
509 try:
510 wlock = self.wlock(wait=0)
510 wlock = self.wlock(wait=0)
511 except lock.LockHeld:
511 except lock.LockHeld:
512 wlock = None
512 wlock = None
513 lookup, modified, added, removed, deleted, unknown = (
513 lookup, modified, added, removed, deleted, unknown = (
514 self.dirstate.changes(files, match))
514 self.dirstate.changes(files, match))
515
515
516 # are we comparing working dir against its parent?
516 # are we comparing working dir against its parent?
517 if not node1:
517 if not node1:
518 if lookup:
518 if lookup:
519 # do a full compare of any files that might have changed
519 # do a full compare of any files that might have changed
520 mf2 = mfmatches(self.dirstate.parents()[0])
520 mf2 = mfmatches(self.dirstate.parents()[0])
521 for f in lookup:
521 for f in lookup:
522 if fcmp(f, mf2):
522 if fcmp(f, mf2):
523 modified.append(f)
523 modified.append(f)
524 elif wlock is not None:
524 elif wlock is not None:
525 self.dirstate.update([f], "n")
525 self.dirstate.update([f], "n")
526 else:
526 else:
527 # we are comparing working dir against non-parent
527 # we are comparing working dir against non-parent
528 # generate a pseudo-manifest for the working dir
528 # generate a pseudo-manifest for the working dir
529 mf2 = mfmatches(self.dirstate.parents()[0])
529 mf2 = mfmatches(self.dirstate.parents()[0])
530 for f in lookup + modified + added:
530 for f in lookup + modified + added:
531 mf2[f] = ""
531 mf2[f] = ""
532 for f in removed:
532 for f in removed:
533 if f in mf2:
533 if f in mf2:
534 del mf2[f]
534 del mf2[f]
535 else:
535 else:
536 # we are comparing two revisions
536 # we are comparing two revisions
537 deleted, unknown = [], []
537 deleted, unknown = [], []
538 mf2 = mfmatches(node2)
538 mf2 = mfmatches(node2)
539
539
540 if node1:
540 if node1:
541 # flush lists from dirstate before comparing manifests
541 # flush lists from dirstate before comparing manifests
542 modified, added = [], []
542 modified, added = [], []
543
543
544 mf1 = mfmatches(node1)
544 mf1 = mfmatches(node1)
545
545
546 for fn in mf2:
546 for fn in mf2:
547 if mf1.has_key(fn):
547 if mf1.has_key(fn):
548 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
548 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
549 modified.append(fn)
549 modified.append(fn)
550 del mf1[fn]
550 del mf1[fn]
551 else:
551 else:
552 added.append(fn)
552 added.append(fn)
553
553
554 removed = mf1.keys()
554 removed = mf1.keys()
555
555
556 # sort and return results:
556 # sort and return results:
557 for l in modified, added, removed, deleted, unknown:
557 for l in modified, added, removed, deleted, unknown:
558 l.sort()
558 l.sort()
559 return (modified, added, removed, deleted, unknown)
559 return (modified, added, removed, deleted, unknown)
560
560
561 def add(self, list, wlock=None):
561 def add(self, list, wlock=None):
562 if not wlock:
562 if not wlock:
563 wlock = self.wlock()
563 wlock = self.wlock()
564 for f in list:
564 for f in list:
565 p = self.wjoin(f)
565 p = self.wjoin(f)
566 if not os.path.exists(p):
566 if not os.path.exists(p):
567 self.ui.warn(_("%s does not exist!\n") % f)
567 self.ui.warn(_("%s does not exist!\n") % f)
568 elif not os.path.isfile(p):
568 elif not os.path.isfile(p):
569 self.ui.warn(_("%s not added: only files supported currently\n")
569 self.ui.warn(_("%s not added: only files supported currently\n")
570 % f)
570 % f)
571 elif self.dirstate.state(f) in 'an':
571 elif self.dirstate.state(f) in 'an':
572 self.ui.warn(_("%s already tracked!\n") % f)
572 self.ui.warn(_("%s already tracked!\n") % f)
573 else:
573 else:
574 self.dirstate.update([f], "a")
574 self.dirstate.update([f], "a")
575
575
576 def forget(self, list, wlock=None):
576 def forget(self, list, wlock=None):
577 if not wlock:
577 if not wlock:
578 wlock = self.wlock()
578 wlock = self.wlock()
579 for f in list:
579 for f in list:
580 if self.dirstate.state(f) not in 'ai':
580 if self.dirstate.state(f) not in 'ai':
581 self.ui.warn(_("%s not added!\n") % f)
581 self.ui.warn(_("%s not added!\n") % f)
582 else:
582 else:
583 self.dirstate.forget([f])
583 self.dirstate.forget([f])
584
584
585 def remove(self, list, unlink=False, wlock=None):
585 def remove(self, list, unlink=False, wlock=None):
586 if unlink:
586 if unlink:
587 for f in list:
587 for f in list:
588 try:
588 try:
589 util.unlink(self.wjoin(f))
589 util.unlink(self.wjoin(f))
590 except OSError, inst:
590 except OSError, inst:
591 if inst.errno != errno.ENOENT:
591 if inst.errno != errno.ENOENT:
592 raise
592 raise
593 if not wlock:
593 if not wlock:
594 wlock = self.wlock()
594 wlock = self.wlock()
595 for f in list:
595 for f in list:
596 p = self.wjoin(f)
596 p = self.wjoin(f)
597 if os.path.exists(p):
597 if os.path.exists(p):
598 self.ui.warn(_("%s still exists!\n") % f)
598 self.ui.warn(_("%s still exists!\n") % f)
599 elif self.dirstate.state(f) == 'a':
599 elif self.dirstate.state(f) == 'a':
600 self.dirstate.forget([f])
600 self.dirstate.forget([f])
601 elif f not in self.dirstate:
601 elif f not in self.dirstate:
602 self.ui.warn(_("%s not tracked!\n") % f)
602 self.ui.warn(_("%s not tracked!\n") % f)
603 else:
603 else:
604 self.dirstate.update([f], "r")
604 self.dirstate.update([f], "r")
605
605
606 def undelete(self, list, wlock=None):
606 def undelete(self, list, wlock=None):
607 p = self.dirstate.parents()[0]
607 p = self.dirstate.parents()[0]
608 mn = self.changelog.read(p)[0]
608 mn = self.changelog.read(p)[0]
609 mf = self.manifest.readflags(mn)
609 mf = self.manifest.readflags(mn)
610 m = self.manifest.read(mn)
610 m = self.manifest.read(mn)
611 if not wlock:
611 if not wlock:
612 wlock = self.wlock()
612 wlock = self.wlock()
613 for f in list:
613 for f in list:
614 if self.dirstate.state(f) not in "r":
614 if self.dirstate.state(f) not in "r":
615 self.ui.warn("%s not removed!\n" % f)
615 self.ui.warn("%s not removed!\n" % f)
616 else:
616 else:
617 t = self.file(f).read(m[f])
617 t = self.file(f).read(m[f])
618 self.wwrite(f, t)
618 self.wwrite(f, t)
619 util.set_exec(self.wjoin(f), mf[f])
619 util.set_exec(self.wjoin(f), mf[f])
620 self.dirstate.update([f], "n")
620 self.dirstate.update([f], "n")
621
621
622 def copy(self, source, dest, wlock=None):
622 def copy(self, source, dest, wlock=None):
623 p = self.wjoin(dest)
623 p = self.wjoin(dest)
624 if not os.path.exists(p):
624 if not os.path.exists(p):
625 self.ui.warn(_("%s does not exist!\n") % dest)
625 self.ui.warn(_("%s does not exist!\n") % dest)
626 elif not os.path.isfile(p):
626 elif not os.path.isfile(p):
627 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
627 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
628 else:
628 else:
629 if not wlock:
629 if not wlock:
630 wlock = self.wlock()
630 wlock = self.wlock()
631 if self.dirstate.state(dest) == '?':
631 if self.dirstate.state(dest) == '?':
632 self.dirstate.update([dest], "a")
632 self.dirstate.update([dest], "a")
633 self.dirstate.copy(source, dest)
633 self.dirstate.copy(source, dest)
634
634
635 def heads(self, start=None):
635 def heads(self, start=None):
636 heads = self.changelog.heads(start)
636 heads = self.changelog.heads(start)
637 # sort the output in rev descending order
637 # sort the output in rev descending order
638 heads = [(-self.changelog.rev(h), h) for h in heads]
638 heads = [(-self.changelog.rev(h), h) for h in heads]
639 heads.sort()
639 heads.sort()
640 return [n for (r, n) in heads]
640 return [n for (r, n) in heads]
641
641
642 # branchlookup returns a dict giving a list of branches for
642 # branchlookup returns a dict giving a list of branches for
643 # each head. A branch is defined as the tag of a node or
643 # each head. A branch is defined as the tag of a node or
644 # the branch of the node's parents. If a node has multiple
644 # the branch of the node's parents. If a node has multiple
645 # branch tags, tags are eliminated if they are visible from other
645 # branch tags, tags are eliminated if they are visible from other
646 # branch tags.
646 # branch tags.
647 #
647 #
648 # So, for this graph: a->b->c->d->e
648 # So, for this graph: a->b->c->d->e
649 # \ /
649 # \ /
650 # aa -----/
650 # aa -----/
651 # a has tag 2.6.12
651 # a has tag 2.6.12
652 # d has tag 2.6.13
652 # d has tag 2.6.13
653 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
653 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
654 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
654 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
655 # from the list.
655 # from the list.
656 #
656 #
657 # It is possible that more than one head will have the same branch tag.
657 # It is possible that more than one head will have the same branch tag.
658 # callers need to check the result for multiple heads under the same
658 # callers need to check the result for multiple heads under the same
659 # branch tag if that is a problem for them (ie checkout of a specific
659 # branch tag if that is a problem for them (ie checkout of a specific
660 # branch).
660 # branch).
661 #
661 #
662 # passing in a specific branch will limit the depth of the search
662 # passing in a specific branch will limit the depth of the search
663 # through the parents. It won't limit the branches returned in the
663 # through the parents. It won't limit the branches returned in the
664 # result though.
664 # result though.
665 def branchlookup(self, heads=None, branch=None):
665 def branchlookup(self, heads=None, branch=None):
666 if not heads:
666 if not heads:
667 heads = self.heads()
667 heads = self.heads()
668 headt = [ h for h in heads ]
668 headt = [ h for h in heads ]
669 chlog = self.changelog
669 chlog = self.changelog
670 branches = {}
670 branches = {}
671 merges = []
671 merges = []
672 seenmerge = {}
672 seenmerge = {}
673
673
674 # traverse the tree once for each head, recording in the branches
674 # traverse the tree once for each head, recording in the branches
675 # dict which tags are visible from this head. The branches
675 # dict which tags are visible from this head. The branches
676 # dict also records which tags are visible from each tag
676 # dict also records which tags are visible from each tag
677 # while we traverse.
677 # while we traverse.
678 while headt or merges:
678 while headt or merges:
679 if merges:
679 if merges:
680 n, found = merges.pop()
680 n, found = merges.pop()
681 visit = [n]
681 visit = [n]
682 else:
682 else:
683 h = headt.pop()
683 h = headt.pop()
684 visit = [h]
684 visit = [h]
685 found = [h]
685 found = [h]
686 seen = {}
686 seen = {}
687 while visit:
687 while visit:
688 n = visit.pop()
688 n = visit.pop()
689 if n in seen:
689 if n in seen:
690 continue
690 continue
691 pp = chlog.parents(n)
691 pp = chlog.parents(n)
692 tags = self.nodetags(n)
692 tags = self.nodetags(n)
693 if tags:
693 if tags:
694 for x in tags:
694 for x in tags:
695 if x == 'tip':
695 if x == 'tip':
696 continue
696 continue
697 for f in found:
697 for f in found:
698 branches.setdefault(f, {})[n] = 1
698 branches.setdefault(f, {})[n] = 1
699 branches.setdefault(n, {})[n] = 1
699 branches.setdefault(n, {})[n] = 1
700 break
700 break
701 if n not in found:
701 if n not in found:
702 found.append(n)
702 found.append(n)
703 if branch in tags:
703 if branch in tags:
704 continue
704 continue
705 seen[n] = 1
705 seen[n] = 1
706 if pp[1] != nullid and n not in seenmerge:
706 if pp[1] != nullid and n not in seenmerge:
707 merges.append((pp[1], [x for x in found]))
707 merges.append((pp[1], [x for x in found]))
708 seenmerge[n] = 1
708 seenmerge[n] = 1
709 if pp[0] != nullid:
709 if pp[0] != nullid:
710 visit.append(pp[0])
710 visit.append(pp[0])
711 # traverse the branches dict, eliminating branch tags from each
711 # traverse the branches dict, eliminating branch tags from each
712 # head that are visible from another branch tag for that head.
712 # head that are visible from another branch tag for that head.
713 out = {}
713 out = {}
714 viscache = {}
714 viscache = {}
715 for h in heads:
715 for h in heads:
716 def visible(node):
716 def visible(node):
717 if node in viscache:
717 if node in viscache:
718 return viscache[node]
718 return viscache[node]
719 ret = {}
719 ret = {}
720 visit = [node]
720 visit = [node]
721 while visit:
721 while visit:
722 x = visit.pop()
722 x = visit.pop()
723 if x in viscache:
723 if x in viscache:
724 ret.update(viscache[x])
724 ret.update(viscache[x])
725 elif x not in ret:
725 elif x not in ret:
726 ret[x] = 1
726 ret[x] = 1
727 if x in branches:
727 if x in branches:
728 visit[len(visit):] = branches[x].keys()
728 visit[len(visit):] = branches[x].keys()
729 viscache[node] = ret
729 viscache[node] = ret
730 return ret
730 return ret
731 if h not in branches:
731 if h not in branches:
732 continue
732 continue
733 # O(n^2), but somewhat limited. This only searches the
733 # O(n^2), but somewhat limited. This only searches the
734 # tags visible from a specific head, not all the tags in the
734 # tags visible from a specific head, not all the tags in the
735 # whole repo.
735 # whole repo.
736 for b in branches[h]:
736 for b in branches[h]:
737 vis = False
737 vis = False
738 for bb in branches[h].keys():
738 for bb in branches[h].keys():
739 if b != bb:
739 if b != bb:
740 if b in visible(bb):
740 if b in visible(bb):
741 vis = True
741 vis = True
742 break
742 break
743 if not vis:
743 if not vis:
744 l = out.setdefault(h, [])
744 l = out.setdefault(h, [])
745 l[len(l):] = self.nodetags(b)
745 l[len(l):] = self.nodetags(b)
746 return out
746 return out
747
747
748 def branches(self, nodes):
748 def branches(self, nodes):
749 if not nodes:
749 if not nodes:
750 nodes = [self.changelog.tip()]
750 nodes = [self.changelog.tip()]
751 b = []
751 b = []
752 for n in nodes:
752 for n in nodes:
753 t = n
753 t = n
754 while n:
754 while n:
755 p = self.changelog.parents(n)
755 p = self.changelog.parents(n)
756 if p[1] != nullid or p[0] == nullid:
756 if p[1] != nullid or p[0] == nullid:
757 b.append((t, n, p[0], p[1]))
757 b.append((t, n, p[0], p[1]))
758 break
758 break
759 n = p[0]
759 n = p[0]
760 return b
760 return b
761
761
762 def between(self, pairs):
762 def between(self, pairs):
763 r = []
763 r = []
764
764
765 for top, bottom in pairs:
765 for top, bottom in pairs:
766 n, l, i = top, [], 0
766 n, l, i = top, [], 0
767 f = 1
767 f = 1
768
768
769 while n != bottom:
769 while n != bottom:
770 p = self.changelog.parents(n)[0]
770 p = self.changelog.parents(n)[0]
771 if i == f:
771 if i == f:
772 l.append(n)
772 l.append(n)
773 f = f * 2
773 f = f * 2
774 n = p
774 n = p
775 i += 1
775 i += 1
776
776
777 r.append(l)
777 r.append(l)
778
778
779 return r
779 return r
780
780
781 def findincoming(self, remote, base=None, heads=None):
781 def findincoming(self, remote, base=None, heads=None):
782 m = self.changelog.nodemap
782 m = self.changelog.nodemap
783 search = []
783 search = []
784 fetch = {}
784 fetch = {}
785 seen = {}
785 seen = {}
786 seenbranch = {}
786 seenbranch = {}
787 if base == None:
787 if base == None:
788 base = {}
788 base = {}
789
789
790 # assume we're closer to the tip than the root
790 # assume we're closer to the tip than the root
791 # and start by examining the heads
791 # and start by examining the heads
792 self.ui.status(_("searching for changes\n"))
792 self.ui.status(_("searching for changes\n"))
793
793
794 if not heads:
794 if not heads:
795 heads = remote.heads()
795 heads = remote.heads()
796
796
797 unknown = []
797 unknown = []
798 for h in heads:
798 for h in heads:
799 if h not in m:
799 if h not in m:
800 unknown.append(h)
800 unknown.append(h)
801 else:
801 else:
802 base[h] = 1
802 base[h] = 1
803
803
804 if not unknown:
804 if not unknown:
805 return None
805 return None
806
806
807 rep = {}
807 rep = {}
808 reqcnt = 0
808 reqcnt = 0
809
809
810 # search through remote branches
810 # search through remote branches
811 # a 'branch' here is a linear segment of history, with four parts:
811 # a 'branch' here is a linear segment of history, with four parts:
812 # head, root, first parent, second parent
812 # head, root, first parent, second parent
813 # (a branch always has two parents (or none) by definition)
813 # (a branch always has two parents (or none) by definition)
814 unknown = remote.branches(unknown)
814 unknown = remote.branches(unknown)
815 while unknown:
815 while unknown:
816 r = []
816 r = []
817 while unknown:
817 while unknown:
818 n = unknown.pop(0)
818 n = unknown.pop(0)
819 if n[0] in seen:
819 if n[0] in seen:
820 continue
820 continue
821
821
822 self.ui.debug(_("examining %s:%s\n")
822 self.ui.debug(_("examining %s:%s\n")
823 % (short(n[0]), short(n[1])))
823 % (short(n[0]), short(n[1])))
824 if n[0] == nullid:
824 if n[0] == nullid:
825 break
825 break
826 if n in seenbranch:
826 if n in seenbranch:
827 self.ui.debug(_("branch already found\n"))
827 self.ui.debug(_("branch already found\n"))
828 continue
828 continue
829 if n[1] and n[1] in m: # do we know the base?
829 if n[1] and n[1] in m: # do we know the base?
830 self.ui.debug(_("found incomplete branch %s:%s\n")
830 self.ui.debug(_("found incomplete branch %s:%s\n")
831 % (short(n[0]), short(n[1])))
831 % (short(n[0]), short(n[1])))
832 search.append(n) # schedule branch range for scanning
832 search.append(n) # schedule branch range for scanning
833 seenbranch[n] = 1
833 seenbranch[n] = 1
834 else:
834 else:
835 if n[1] not in seen and n[1] not in fetch:
835 if n[1] not in seen and n[1] not in fetch:
836 if n[2] in m and n[3] in m:
836 if n[2] in m and n[3] in m:
837 self.ui.debug(_("found new changeset %s\n") %
837 self.ui.debug(_("found new changeset %s\n") %
838 short(n[1]))
838 short(n[1]))
839 fetch[n[1]] = 1 # earliest unknown
839 fetch[n[1]] = 1 # earliest unknown
840 base[n[2]] = 1 # latest known
840 base[n[2]] = 1 # latest known
841 continue
841 continue
842
842
843 for a in n[2:4]:
843 for a in n[2:4]:
844 if a not in rep:
844 if a not in rep:
845 r.append(a)
845 r.append(a)
846 rep[a] = 1
846 rep[a] = 1
847
847
848 seen[n[0]] = 1
848 seen[n[0]] = 1
849
849
850 if r:
850 if r:
851 reqcnt += 1
851 reqcnt += 1
852 self.ui.debug(_("request %d: %s\n") %
852 self.ui.debug(_("request %d: %s\n") %
853 (reqcnt, " ".join(map(short, r))))
853 (reqcnt, " ".join(map(short, r))))
854 for p in range(0, len(r), 10):
854 for p in range(0, len(r), 10):
855 for b in remote.branches(r[p:p+10]):
855 for b in remote.branches(r[p:p+10]):
856 self.ui.debug(_("received %s:%s\n") %
856 self.ui.debug(_("received %s:%s\n") %
857 (short(b[0]), short(b[1])))
857 (short(b[0]), short(b[1])))
858 if b[0] in m:
858 if b[0] in m:
859 self.ui.debug(_("found base node %s\n")
859 self.ui.debug(_("found base node %s\n")
860 % short(b[0]))
860 % short(b[0]))
861 base[b[0]] = 1
861 base[b[0]] = 1
862 elif b[0] not in seen:
862 elif b[0] not in seen:
863 unknown.append(b)
863 unknown.append(b)
864
864
865 # do binary search on the branches we found
865 # do binary search on the branches we found
866 while search:
866 while search:
867 n = search.pop(0)
867 n = search.pop(0)
868 reqcnt += 1
868 reqcnt += 1
869 l = remote.between([(n[0], n[1])])[0]
869 l = remote.between([(n[0], n[1])])[0]
870 l.append(n[1])
870 l.append(n[1])
871 p = n[0]
871 p = n[0]
872 f = 1
872 f = 1
873 for i in l:
873 for i in l:
874 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
874 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
875 if i in m:
875 if i in m:
876 if f <= 2:
876 if f <= 2:
877 self.ui.debug(_("found new branch changeset %s\n") %
877 self.ui.debug(_("found new branch changeset %s\n") %
878 short(p))
878 short(p))
879 fetch[p] = 1
879 fetch[p] = 1
880 base[i] = 1
880 base[i] = 1
881 else:
881 else:
882 self.ui.debug(_("narrowed branch search to %s:%s\n")
882 self.ui.debug(_("narrowed branch search to %s:%s\n")
883 % (short(p), short(i)))
883 % (short(p), short(i)))
884 search.append((p, i))
884 search.append((p, i))
885 break
885 break
886 p, f = i, f * 2
886 p, f = i, f * 2
887
887
888 # sanity check our fetch list
888 # sanity check our fetch list
889 for f in fetch.keys():
889 for f in fetch.keys():
890 if f in m:
890 if f in m:
891 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
891 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
892
892
893 if base.keys() == [nullid]:
893 if base.keys() == [nullid]:
894 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
894 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
895
895
896 self.ui.note(_("found new changesets starting at ") +
896 self.ui.note(_("found new changesets starting at ") +
897 " ".join([short(f) for f in fetch]) + "\n")
897 " ".join([short(f) for f in fetch]) + "\n")
898
898
899 self.ui.debug(_("%d total queries\n") % reqcnt)
899 self.ui.debug(_("%d total queries\n") % reqcnt)
900
900
901 return fetch.keys()
901 return fetch.keys()
902
902
903 def findoutgoing(self, remote, base=None, heads=None):
903 def findoutgoing(self, remote, base=None, heads=None):
904 if base == None:
904 if base == None:
905 base = {}
905 base = {}
906 self.findincoming(remote, base, heads)
906 self.findincoming(remote, base, heads)
907
907
908 self.ui.debug(_("common changesets up to ")
908 self.ui.debug(_("common changesets up to ")
909 + " ".join(map(short, base.keys())) + "\n")
909 + " ".join(map(short, base.keys())) + "\n")
910
910
911 remain = dict.fromkeys(self.changelog.nodemap)
911 remain = dict.fromkeys(self.changelog.nodemap)
912
912
913 # prune everything remote has from the tree
913 # prune everything remote has from the tree
914 del remain[nullid]
914 del remain[nullid]
915 remove = base.keys()
915 remove = base.keys()
916 while remove:
916 while remove:
917 n = remove.pop(0)
917 n = remove.pop(0)
918 if n in remain:
918 if n in remain:
919 del remain[n]
919 del remain[n]
920 for p in self.changelog.parents(n):
920 for p in self.changelog.parents(n):
921 remove.append(p)
921 remove.append(p)
922
922
923 # find every node whose parents have been pruned
923 # find every node whose parents have been pruned
924 subset = []
924 subset = []
925 for n in remain:
925 for n in remain:
926 p1, p2 = self.changelog.parents(n)
926 p1, p2 = self.changelog.parents(n)
927 if p1 not in remain and p2 not in remain:
927 if p1 not in remain and p2 not in remain:
928 subset.append(n)
928 subset.append(n)
929
929
930 # this is the set of all roots we have to push
930 # this is the set of all roots we have to push
931 return subset
931 return subset
932
932
933 def pull(self, remote, heads=None):
933 def pull(self, remote, heads=None):
934 lock = self.lock()
934 lock = self.lock()
935
935
936 # if we have an empty repo, fetch everything
936 # if we have an empty repo, fetch everything
937 if self.changelog.tip() == nullid:
937 if self.changelog.tip() == nullid:
938 self.ui.status(_("requesting all changes\n"))
938 self.ui.status(_("requesting all changes\n"))
939 fetch = [nullid]
939 fetch = [nullid]
940 else:
940 else:
941 fetch = self.findincoming(remote)
941 fetch = self.findincoming(remote)
942
942
943 if not fetch:
943 if not fetch:
944 self.ui.status(_("no changes found\n"))
944 self.ui.status(_("no changes found\n"))
945 return 1
945 return 1
946
946
947 if heads is None:
947 if heads is None:
948 cg = remote.changegroup(fetch, 'pull')
948 cg = remote.changegroup(fetch, 'pull')
949 else:
949 else:
950 cg = remote.changegroupsubset(fetch, heads, 'pull')
950 cg = remote.changegroupsubset(fetch, heads, 'pull')
951 return self.addchangegroup(cg)
951 return self.addchangegroup(cg)
952
952
953 def push(self, remote, force=False):
953 def push(self, remote, force=False):
954 lock = remote.lock()
954 lock = remote.lock()
955
955
956 base = {}
956 base = {}
957 heads = remote.heads()
957 heads = remote.heads()
958 inc = self.findincoming(remote, base, heads)
958 inc = self.findincoming(remote, base, heads)
959 if not force and inc:
959 if not force and inc:
960 self.ui.warn(_("abort: unsynced remote changes!\n"))
960 self.ui.warn(_("abort: unsynced remote changes!\n"))
961 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
961 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
962 return 1
962 return 1
963
963
964 update = self.findoutgoing(remote, base)
964 update = self.findoutgoing(remote, base)
965 if not update:
965 if not update:
966 self.ui.status(_("no changes found\n"))
966 self.ui.status(_("no changes found\n"))
967 return 1
967 return 1
968 elif not force:
968 elif not force:
969 if len(heads) < len(self.changelog.heads()):
969 if len(heads) < len(self.changelog.heads()):
970 self.ui.warn(_("abort: push creates new remote branches!\n"))
970 self.ui.warn(_("abort: push creates new remote branches!\n"))
971 self.ui.status(_("(did you forget to merge?"
971 self.ui.status(_("(did you forget to merge?"
972 " use push -f to force)\n"))
972 " use push -f to force)\n"))
973 return 1
973 return 1
974
974
975 cg = self.changegroup(update, 'push')
975 cg = self.changegroup(update, 'push')
976 return remote.addchangegroup(cg)
976 return remote.addchangegroup(cg)
977
977
978 def changegroupsubset(self, bases, heads, source):
978 def changegroupsubset(self, bases, heads, source):
979 """This function generates a changegroup consisting of all the nodes
979 """This function generates a changegroup consisting of all the nodes
980 that are descendents of any of the bases, and ancestors of any of
980 that are descendents of any of the bases, and ancestors of any of
981 the heads.
981 the heads.
982
982
983 It is fairly complex as determining which filenodes and which
983 It is fairly complex as determining which filenodes and which
984 manifest nodes need to be included for the changeset to be complete
984 manifest nodes need to be included for the changeset to be complete
985 is non-trivial.
985 is non-trivial.
986
986
987 Another wrinkle is doing the reverse, figuring out which changeset in
987 Another wrinkle is doing the reverse, figuring out which changeset in
988 the changegroup a particular filenode or manifestnode belongs to."""
988 the changegroup a particular filenode or manifestnode belongs to."""
989
989
990 self.hook('preoutgoing', throw=True, source=source)
990 self.hook('preoutgoing', throw=True, source=source)
991
991
992 # Set up some initial variables
992 # Set up some initial variables
993 # Make it easy to refer to self.changelog
993 # Make it easy to refer to self.changelog
994 cl = self.changelog
994 cl = self.changelog
995 # msng is short for missing - compute the list of changesets in this
995 # msng is short for missing - compute the list of changesets in this
996 # changegroup.
996 # changegroup.
997 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
997 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
998 # Some bases may turn out to be superfluous, and some heads may be
998 # Some bases may turn out to be superfluous, and some heads may be
999 # too. nodesbetween will return the minimal set of bases and heads
999 # too. nodesbetween will return the minimal set of bases and heads
1000 # necessary to re-create the changegroup.
1000 # necessary to re-create the changegroup.
1001
1001
1002 # Known heads are the list of heads that it is assumed the recipient
1002 # Known heads are the list of heads that it is assumed the recipient
1003 # of this changegroup will know about.
1003 # of this changegroup will know about.
1004 knownheads = {}
1004 knownheads = {}
1005 # We assume that all parents of bases are known heads.
1005 # We assume that all parents of bases are known heads.
1006 for n in bases:
1006 for n in bases:
1007 for p in cl.parents(n):
1007 for p in cl.parents(n):
1008 if p != nullid:
1008 if p != nullid:
1009 knownheads[p] = 1
1009 knownheads[p] = 1
1010 knownheads = knownheads.keys()
1010 knownheads = knownheads.keys()
1011 if knownheads:
1011 if knownheads:
1012 # Now that we know what heads are known, we can compute which
1012 # Now that we know what heads are known, we can compute which
1013 # changesets are known. The recipient must know about all
1013 # changesets are known. The recipient must know about all
1014 # changesets required to reach the known heads from the null
1014 # changesets required to reach the known heads from the null
1015 # changeset.
1015 # changeset.
1016 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1016 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1017 junk = None
1017 junk = None
1018 # Transform the list into an ersatz set.
1018 # Transform the list into an ersatz set.
1019 has_cl_set = dict.fromkeys(has_cl_set)
1019 has_cl_set = dict.fromkeys(has_cl_set)
1020 else:
1020 else:
1021 # If there were no known heads, the recipient cannot be assumed to
1021 # If there were no known heads, the recipient cannot be assumed to
1022 # know about any changesets.
1022 # know about any changesets.
1023 has_cl_set = {}
1023 has_cl_set = {}
1024
1024
1025 # Make it easy to refer to self.manifest
1025 # Make it easy to refer to self.manifest
1026 mnfst = self.manifest
1026 mnfst = self.manifest
1027 # We don't know which manifests are missing yet
1027 # We don't know which manifests are missing yet
1028 msng_mnfst_set = {}
1028 msng_mnfst_set = {}
1029 # Nor do we know which filenodes are missing.
1029 # Nor do we know which filenodes are missing.
1030 msng_filenode_set = {}
1030 msng_filenode_set = {}
1031
1031
1032 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1032 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1033 junk = None
1033 junk = None
1034
1034
1035 # A changeset always belongs to itself, so the changenode lookup
1035 # A changeset always belongs to itself, so the changenode lookup
1036 # function for a changenode is identity.
1036 # function for a changenode is identity.
1037 def identity(x):
1037 def identity(x):
1038 return x
1038 return x
1039
1039
1040 # A function generating function. Sets up an environment for the
1040 # A function generating function. Sets up an environment for the
1041 # inner function.
1041 # inner function.
1042 def cmp_by_rev_func(revlog):
1042 def cmp_by_rev_func(revlog):
1043 # Compare two nodes by their revision number in the environment's
1043 # Compare two nodes by their revision number in the environment's
1044 # revision history. Since the revision number both represents the
1044 # revision history. Since the revision number both represents the
1045 # most efficient order to read the nodes in, and represents a
1045 # most efficient order to read the nodes in, and represents a
1046 # topological sorting of the nodes, this function is often useful.
1046 # topological sorting of the nodes, this function is often useful.
1047 def cmp_by_rev(a, b):
1047 def cmp_by_rev(a, b):
1048 return cmp(revlog.rev(a), revlog.rev(b))
1048 return cmp(revlog.rev(a), revlog.rev(b))
1049 return cmp_by_rev
1049 return cmp_by_rev
1050
1050
1051 # If we determine that a particular file or manifest node must be a
1051 # If we determine that a particular file or manifest node must be a
1052 # node that the recipient of the changegroup will already have, we can
1052 # node that the recipient of the changegroup will already have, we can
1053 # also assume the recipient will have all the parents. This function
1053 # also assume the recipient will have all the parents. This function
1054 # prunes them from the set of missing nodes.
1054 # prunes them from the set of missing nodes.
1055 def prune_parents(revlog, hasset, msngset):
1055 def prune_parents(revlog, hasset, msngset):
1056 haslst = hasset.keys()
1056 haslst = hasset.keys()
1057 haslst.sort(cmp_by_rev_func(revlog))
1057 haslst.sort(cmp_by_rev_func(revlog))
1058 for node in haslst:
1058 for node in haslst:
1059 parentlst = [p for p in revlog.parents(node) if p != nullid]
1059 parentlst = [p for p in revlog.parents(node) if p != nullid]
1060 while parentlst:
1060 while parentlst:
1061 n = parentlst.pop()
1061 n = parentlst.pop()
1062 if n not in hasset:
1062 if n not in hasset:
1063 hasset[n] = 1
1063 hasset[n] = 1
1064 p = [p for p in revlog.parents(n) if p != nullid]
1064 p = [p for p in revlog.parents(n) if p != nullid]
1065 parentlst.extend(p)
1065 parentlst.extend(p)
1066 for n in hasset:
1066 for n in hasset:
1067 msngset.pop(n, None)
1067 msngset.pop(n, None)
1068
1068
1069 # This is a function generating function used to set up an environment
1069 # This is a function generating function used to set up an environment
1070 # for the inner function to execute in.
1070 # for the inner function to execute in.
1071 def manifest_and_file_collector(changedfileset):
1071 def manifest_and_file_collector(changedfileset):
1072 # This is an information gathering function that gathers
1072 # This is an information gathering function that gathers
1073 # information from each changeset node that goes out as part of
1073 # information from each changeset node that goes out as part of
1074 # the changegroup. The information gathered is a list of which
1074 # the changegroup. The information gathered is a list of which
1075 # manifest nodes are potentially required (the recipient may
1075 # manifest nodes are potentially required (the recipient may
1076 # already have them) and total list of all files which were
1076 # already have them) and total list of all files which were
1077 # changed in any changeset in the changegroup.
1077 # changed in any changeset in the changegroup.
1078 #
1078 #
1079 # We also remember the first changenode we saw any manifest
1079 # We also remember the first changenode we saw any manifest
1080 # referenced by so we can later determine which changenode 'owns'
1080 # referenced by so we can later determine which changenode 'owns'
1081 # the manifest.
1081 # the manifest.
1082 def collect_manifests_and_files(clnode):
1082 def collect_manifests_and_files(clnode):
1083 c = cl.read(clnode)
1083 c = cl.read(clnode)
1084 for f in c[3]:
1084 for f in c[3]:
1085 # This is to make sure we only have one instance of each
1085 # This is to make sure we only have one instance of each
1086 # filename string for each filename.
1086 # filename string for each filename.
1087 changedfileset.setdefault(f, f)
1087 changedfileset.setdefault(f, f)
1088 msng_mnfst_set.setdefault(c[0], clnode)
1088 msng_mnfst_set.setdefault(c[0], clnode)
1089 return collect_manifests_and_files
1089 return collect_manifests_and_files
1090
1090
1091 # Figure out which manifest nodes (of the ones we think might be part
1091 # Figure out which manifest nodes (of the ones we think might be part
1092 # of the changegroup) the recipient must know about and remove them
1092 # of the changegroup) the recipient must know about and remove them
1093 # from the changegroup.
1093 # from the changegroup.
1094 def prune_manifests():
1094 def prune_manifests():
1095 has_mnfst_set = {}
1095 has_mnfst_set = {}
1096 for n in msng_mnfst_set:
1096 for n in msng_mnfst_set:
1097 # If a 'missing' manifest thinks it belongs to a changenode
1097 # If a 'missing' manifest thinks it belongs to a changenode
1098 # the recipient is assumed to have, obviously the recipient
1098 # the recipient is assumed to have, obviously the recipient
1099 # must have that manifest.
1099 # must have that manifest.
1100 linknode = cl.node(mnfst.linkrev(n))
1100 linknode = cl.node(mnfst.linkrev(n))
1101 if linknode in has_cl_set:
1101 if linknode in has_cl_set:
1102 has_mnfst_set[n] = 1
1102 has_mnfst_set[n] = 1
1103 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1103 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1104
1104
1105 # Use the information collected in collect_manifests_and_files to say
1105 # Use the information collected in collect_manifests_and_files to say
1106 # which changenode any manifestnode belongs to.
1106 # which changenode any manifestnode belongs to.
1107 def lookup_manifest_link(mnfstnode):
1107 def lookup_manifest_link(mnfstnode):
1108 return msng_mnfst_set[mnfstnode]
1108 return msng_mnfst_set[mnfstnode]
1109
1109
1110 # A function generating function that sets up the initial environment
1110 # A function generating function that sets up the initial environment
1111 # the inner function.
1111 # the inner function.
1112 def filenode_collector(changedfiles):
1112 def filenode_collector(changedfiles):
1113 next_rev = [0]
1113 next_rev = [0]
1114 # This gathers information from each manifestnode included in the
1114 # This gathers information from each manifestnode included in the
1115 # changegroup about which filenodes the manifest node references
1115 # changegroup about which filenodes the manifest node references
1116 # so we can include those in the changegroup too.
1116 # so we can include those in the changegroup too.
1117 #
1117 #
1118 # It also remembers which changenode each filenode belongs to. It
1118 # It also remembers which changenode each filenode belongs to. It
1119 # does this by assuming the a filenode belongs to the changenode
1119 # does this by assuming the a filenode belongs to the changenode
1120 # the first manifest that references it belongs to.
1120 # the first manifest that references it belongs to.
1121 def collect_msng_filenodes(mnfstnode):
1121 def collect_msng_filenodes(mnfstnode):
1122 r = mnfst.rev(mnfstnode)
1122 r = mnfst.rev(mnfstnode)
1123 if r == next_rev[0]:
1123 if r == next_rev[0]:
1124 # If the last rev we looked at was the one just previous,
1124 # If the last rev we looked at was the one just previous,
1125 # we only need to see a diff.
1125 # we only need to see a diff.
1126 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1126 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1127 # For each line in the delta
1127 # For each line in the delta
1128 for dline in delta.splitlines():
1128 for dline in delta.splitlines():
1129 # get the filename and filenode for that line
1129 # get the filename and filenode for that line
1130 f, fnode = dline.split('\0')
1130 f, fnode = dline.split('\0')
1131 fnode = bin(fnode[:40])
1131 fnode = bin(fnode[:40])
1132 f = changedfiles.get(f, None)
1132 f = changedfiles.get(f, None)
1133 # And if the file is in the list of files we care
1133 # And if the file is in the list of files we care
1134 # about.
1134 # about.
1135 if f is not None:
1135 if f is not None:
1136 # Get the changenode this manifest belongs to
1136 # Get the changenode this manifest belongs to
1137 clnode = msng_mnfst_set[mnfstnode]
1137 clnode = msng_mnfst_set[mnfstnode]
1138 # Create the set of filenodes for the file if
1138 # Create the set of filenodes for the file if
1139 # there isn't one already.
1139 # there isn't one already.
1140 ndset = msng_filenode_set.setdefault(f, {})
1140 ndset = msng_filenode_set.setdefault(f, {})
1141 # And set the filenode's changelog node to the
1141 # And set the filenode's changelog node to the
1142 # manifest's if it hasn't been set already.
1142 # manifest's if it hasn't been set already.
1143 ndset.setdefault(fnode, clnode)
1143 ndset.setdefault(fnode, clnode)
1144 else:
1144 else:
1145 # Otherwise we need a full manifest.
1145 # Otherwise we need a full manifest.
1146 m = mnfst.read(mnfstnode)
1146 m = mnfst.read(mnfstnode)
1147 # For every file in we care about.
1147 # For every file in we care about.
1148 for f in changedfiles:
1148 for f in changedfiles:
1149 fnode = m.get(f, None)
1149 fnode = m.get(f, None)
1150 # If it's in the manifest
1150 # If it's in the manifest
1151 if fnode is not None:
1151 if fnode is not None:
1152 # See comments above.
1152 # See comments above.
1153 clnode = msng_mnfst_set[mnfstnode]
1153 clnode = msng_mnfst_set[mnfstnode]
1154 ndset = msng_filenode_set.setdefault(f, {})
1154 ndset = msng_filenode_set.setdefault(f, {})
1155 ndset.setdefault(fnode, clnode)
1155 ndset.setdefault(fnode, clnode)
1156 # Remember the revision we hope to see next.
1156 # Remember the revision we hope to see next.
1157 next_rev[0] = r + 1
1157 next_rev[0] = r + 1
1158 return collect_msng_filenodes
1158 return collect_msng_filenodes
1159
1159
1160 # We have a list of filenodes we think we need for a file, lets remove
1160 # We have a list of filenodes we think we need for a file, lets remove
1161 # all those we now the recipient must have.
1161 # all those we now the recipient must have.
1162 def prune_filenodes(f, filerevlog):
1162 def prune_filenodes(f, filerevlog):
1163 msngset = msng_filenode_set[f]
1163 msngset = msng_filenode_set[f]
1164 hasset = {}
1164 hasset = {}
1165 # If a 'missing' filenode thinks it belongs to a changenode we
1165 # If a 'missing' filenode thinks it belongs to a changenode we
1166 # assume the recipient must have, then the recipient must have
1166 # assume the recipient must have, then the recipient must have
1167 # that filenode.
1167 # that filenode.
1168 for n in msngset:
1168 for n in msngset:
1169 clnode = cl.node(filerevlog.linkrev(n))
1169 clnode = cl.node(filerevlog.linkrev(n))
1170 if clnode in has_cl_set:
1170 if clnode in has_cl_set:
1171 hasset[n] = 1
1171 hasset[n] = 1
1172 prune_parents(filerevlog, hasset, msngset)
1172 prune_parents(filerevlog, hasset, msngset)
1173
1173
1174 # A function generator function that sets up the a context for the
1174 # A function generator function that sets up the a context for the
1175 # inner function.
1175 # inner function.
1176 def lookup_filenode_link_func(fname):
1176 def lookup_filenode_link_func(fname):
1177 msngset = msng_filenode_set[fname]
1177 msngset = msng_filenode_set[fname]
1178 # Lookup the changenode the filenode belongs to.
1178 # Lookup the changenode the filenode belongs to.
1179 def lookup_filenode_link(fnode):
1179 def lookup_filenode_link(fnode):
1180 return msngset[fnode]
1180 return msngset[fnode]
1181 return lookup_filenode_link
1181 return lookup_filenode_link
1182
1182
1183 # Now that we have all theses utility functions to help out and
1183 # Now that we have all theses utility functions to help out and
1184 # logically divide up the task, generate the group.
1184 # logically divide up the task, generate the group.
1185 def gengroup():
1185 def gengroup():
1186 # The set of changed files starts empty.
1186 # The set of changed files starts empty.
1187 changedfiles = {}
1187 changedfiles = {}
1188 # Create a changenode group generator that will call our functions
1188 # Create a changenode group generator that will call our functions
1189 # back to lookup the owning changenode and collect information.
1189 # back to lookup the owning changenode and collect information.
1190 group = cl.group(msng_cl_lst, identity,
1190 group = cl.group(msng_cl_lst, identity,
1191 manifest_and_file_collector(changedfiles))
1191 manifest_and_file_collector(changedfiles))
1192 for chnk in group:
1192 for chnk in group:
1193 yield chnk
1193 yield chnk
1194
1194
1195 # The list of manifests has been collected by the generator
1195 # The list of manifests has been collected by the generator
1196 # calling our functions back.
1196 # calling our functions back.
1197 prune_manifests()
1197 prune_manifests()
1198 msng_mnfst_lst = msng_mnfst_set.keys()
1198 msng_mnfst_lst = msng_mnfst_set.keys()
1199 # Sort the manifestnodes by revision number.
1199 # Sort the manifestnodes by revision number.
1200 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1200 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1201 # Create a generator for the manifestnodes that calls our lookup
1201 # Create a generator for the manifestnodes that calls our lookup
1202 # and data collection functions back.
1202 # and data collection functions back.
1203 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1203 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1204 filenode_collector(changedfiles))
1204 filenode_collector(changedfiles))
1205 for chnk in group:
1205 for chnk in group:
1206 yield chnk
1206 yield chnk
1207
1207
1208 # These are no longer needed, dereference and toss the memory for
1208 # These are no longer needed, dereference and toss the memory for
1209 # them.
1209 # them.
1210 msng_mnfst_lst = None
1210 msng_mnfst_lst = None
1211 msng_mnfst_set.clear()
1211 msng_mnfst_set.clear()
1212
1212
1213 changedfiles = changedfiles.keys()
1213 changedfiles = changedfiles.keys()
1214 changedfiles.sort()
1214 changedfiles.sort()
1215 # Go through all our files in order sorted by name.
1215 # Go through all our files in order sorted by name.
1216 for fname in changedfiles:
1216 for fname in changedfiles:
1217 filerevlog = self.file(fname)
1217 filerevlog = self.file(fname)
1218 # Toss out the filenodes that the recipient isn't really
1218 # Toss out the filenodes that the recipient isn't really
1219 # missing.
1219 # missing.
1220 if msng_filenode_set.has_key(fname):
1220 if msng_filenode_set.has_key(fname):
1221 prune_filenodes(fname, filerevlog)
1221 prune_filenodes(fname, filerevlog)
1222 msng_filenode_lst = msng_filenode_set[fname].keys()
1222 msng_filenode_lst = msng_filenode_set[fname].keys()
1223 else:
1223 else:
1224 msng_filenode_lst = []
1224 msng_filenode_lst = []
1225 # If any filenodes are left, generate the group for them,
1225 # If any filenodes are left, generate the group for them,
1226 # otherwise don't bother.
1226 # otherwise don't bother.
1227 if len(msng_filenode_lst) > 0:
1227 if len(msng_filenode_lst) > 0:
1228 yield struct.pack(">l", len(fname) + 4) + fname
1228 yield struct.pack(">l", len(fname) + 4) + fname
1229 # Sort the filenodes by their revision #
1229 # Sort the filenodes by their revision #
1230 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1230 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1231 # Create a group generator and only pass in a changenode
1231 # Create a group generator and only pass in a changenode
1232 # lookup function as we need to collect no information
1232 # lookup function as we need to collect no information
1233 # from filenodes.
1233 # from filenodes.
1234 group = filerevlog.group(msng_filenode_lst,
1234 group = filerevlog.group(msng_filenode_lst,
1235 lookup_filenode_link_func(fname))
1235 lookup_filenode_link_func(fname))
1236 for chnk in group:
1236 for chnk in group:
1237 yield chnk
1237 yield chnk
1238 if msng_filenode_set.has_key(fname):
1238 if msng_filenode_set.has_key(fname):
1239 # Don't need this anymore, toss it to free memory.
1239 # Don't need this anymore, toss it to free memory.
1240 del msng_filenode_set[fname]
1240 del msng_filenode_set[fname]
1241 # Signal that no more groups are left.
1241 # Signal that no more groups are left.
1242 yield struct.pack(">l", 0)
1242 yield struct.pack(">l", 0)
1243
1243
1244 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1244 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1245
1245
1246 return util.chunkbuffer(gengroup())
1246 return util.chunkbuffer(gengroup())
1247
1247
1248 def changegroup(self, basenodes, source):
1248 def changegroup(self, basenodes, source):
1249 """Generate a changegroup of all nodes that we have that a recipient
1249 """Generate a changegroup of all nodes that we have that a recipient
1250 doesn't.
1250 doesn't.
1251
1251
1252 This is much easier than the previous function as we can assume that
1252 This is much easier than the previous function as we can assume that
1253 the recipient has any changenode we aren't sending them."""
1253 the recipient has any changenode we aren't sending them."""
1254
1254
1255 self.hook('preoutgoing', throw=True, source=source)
1255 self.hook('preoutgoing', throw=True, source=source)
1256
1256
1257 cl = self.changelog
1257 cl = self.changelog
1258 nodes = cl.nodesbetween(basenodes, None)[0]
1258 nodes = cl.nodesbetween(basenodes, None)[0]
1259 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1259 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1260
1260
1261 def identity(x):
1261 def identity(x):
1262 return x
1262 return x
1263
1263
1264 def gennodelst(revlog):
1264 def gennodelst(revlog):
1265 for r in xrange(0, revlog.count()):
1265 for r in xrange(0, revlog.count()):
1266 n = revlog.node(r)
1266 n = revlog.node(r)
1267 if revlog.linkrev(n) in revset:
1267 if revlog.linkrev(n) in revset:
1268 yield n
1268 yield n
1269
1269
1270 def changed_file_collector(changedfileset):
1270 def changed_file_collector(changedfileset):
1271 def collect_changed_files(clnode):
1271 def collect_changed_files(clnode):
1272 c = cl.read(clnode)
1272 c = cl.read(clnode)
1273 for fname in c[3]:
1273 for fname in c[3]:
1274 changedfileset[fname] = 1
1274 changedfileset[fname] = 1
1275 return collect_changed_files
1275 return collect_changed_files
1276
1276
1277 def lookuprevlink_func(revlog):
1277 def lookuprevlink_func(revlog):
1278 def lookuprevlink(n):
1278 def lookuprevlink(n):
1279 return cl.node(revlog.linkrev(n))
1279 return cl.node(revlog.linkrev(n))
1280 return lookuprevlink
1280 return lookuprevlink
1281
1281
1282 def gengroup():
1282 def gengroup():
1283 # construct a list of all changed files
1283 # construct a list of all changed files
1284 changedfiles = {}
1284 changedfiles = {}
1285
1285
1286 for chnk in cl.group(nodes, identity,
1286 for chnk in cl.group(nodes, identity,
1287 changed_file_collector(changedfiles)):
1287 changed_file_collector(changedfiles)):
1288 yield chnk
1288 yield chnk
1289 changedfiles = changedfiles.keys()
1289 changedfiles = changedfiles.keys()
1290 changedfiles.sort()
1290 changedfiles.sort()
1291
1291
1292 mnfst = self.manifest
1292 mnfst = self.manifest
1293 nodeiter = gennodelst(mnfst)
1293 nodeiter = gennodelst(mnfst)
1294 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1294 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1295 yield chnk
1295 yield chnk
1296
1296
1297 for fname in changedfiles:
1297 for fname in changedfiles:
1298 filerevlog = self.file(fname)
1298 filerevlog = self.file(fname)
1299 nodeiter = gennodelst(filerevlog)
1299 nodeiter = gennodelst(filerevlog)
1300 nodeiter = list(nodeiter)
1300 nodeiter = list(nodeiter)
1301 if nodeiter:
1301 if nodeiter:
1302 yield struct.pack(">l", len(fname) + 4) + fname
1302 yield struct.pack(">l", len(fname) + 4) + fname
1303 lookup = lookuprevlink_func(filerevlog)
1303 lookup = lookuprevlink_func(filerevlog)
1304 for chnk in filerevlog.group(nodeiter, lookup):
1304 for chnk in filerevlog.group(nodeiter, lookup):
1305 yield chnk
1305 yield chnk
1306
1306
1307 yield struct.pack(">l", 0)
1307 yield struct.pack(">l", 0)
1308 self.hook('outgoing', node=hex(nodes[0]), source=source)
1308 self.hook('outgoing', node=hex(nodes[0]), source=source)
1309
1309
1310 return util.chunkbuffer(gengroup())
1310 return util.chunkbuffer(gengroup())
1311
1311
1312 def addchangegroup(self, source):
1312 def addchangegroup(self, source):
1313
1313
1314 def getchunk():
1314 def getchunk():
1315 d = source.read(4)
1315 d = source.read(4)
1316 if not d:
1316 if not d:
1317 return ""
1317 return ""
1318 l = struct.unpack(">l", d)[0]
1318 l = struct.unpack(">l", d)[0]
1319 if l <= 4:
1319 if l <= 4:
1320 return ""
1320 return ""
1321 d = source.read(l - 4)
1321 d = source.read(l - 4)
1322 if len(d) < l - 4:
1322 if len(d) < l - 4:
1323 raise repo.RepoError(_("premature EOF reading chunk"
1323 raise repo.RepoError(_("premature EOF reading chunk"
1324 " (got %d bytes, expected %d)")
1324 " (got %d bytes, expected %d)")
1325 % (len(d), l - 4))
1325 % (len(d), l - 4))
1326 return d
1326 return d
1327
1327
1328 def getgroup():
1328 def getgroup():
1329 while 1:
1329 while 1:
1330 c = getchunk()
1330 c = getchunk()
1331 if not c:
1331 if not c:
1332 break
1332 break
1333 yield c
1333 yield c
1334
1334
1335 def csmap(x):
1335 def csmap(x):
1336 self.ui.debug(_("add changeset %s\n") % short(x))
1336 self.ui.debug(_("add changeset %s\n") % short(x))
1337 return self.changelog.count()
1337 return self.changelog.count()
1338
1338
1339 def revmap(x):
1339 def revmap(x):
1340 return self.changelog.rev(x)
1340 return self.changelog.rev(x)
1341
1341
1342 if not source:
1342 if not source:
1343 return
1343 return
1344
1344
1345 self.hook('prechangegroup', throw=True)
1345 self.hook('prechangegroup', throw=True)
1346
1346
1347 changesets = files = revisions = 0
1347 changesets = files = revisions = 0
1348
1348
1349 tr = self.transaction()
1349 tr = self.transaction()
1350
1350
1351 oldheads = len(self.changelog.heads())
1351 oldheads = len(self.changelog.heads())
1352
1352
1353 # pull off the changeset group
1353 # pull off the changeset group
1354 self.ui.status(_("adding changesets\n"))
1354 self.ui.status(_("adding changesets\n"))
1355 co = self.changelog.tip()
1355 co = self.changelog.tip()
1356 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1356 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1357 cnr, cor = map(self.changelog.rev, (cn, co))
1357 cnr, cor = map(self.changelog.rev, (cn, co))
1358 if cn == nullid:
1358 if cn == nullid:
1359 cnr = cor
1359 cnr = cor
1360 changesets = cnr - cor
1360 changesets = cnr - cor
1361
1361
1362 # pull off the manifest group
1362 # pull off the manifest group
1363 self.ui.status(_("adding manifests\n"))
1363 self.ui.status(_("adding manifests\n"))
1364 mm = self.manifest.tip()
1364 mm = self.manifest.tip()
1365 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1365 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1366
1366
1367 # process the files
1367 # process the files
1368 self.ui.status(_("adding file changes\n"))
1368 self.ui.status(_("adding file changes\n"))
1369 while 1:
1369 while 1:
1370 f = getchunk()
1370 f = getchunk()
1371 if not f:
1371 if not f:
1372 break
1372 break
1373 self.ui.debug(_("adding %s revisions\n") % f)
1373 self.ui.debug(_("adding %s revisions\n") % f)
1374 fl = self.file(f)
1374 fl = self.file(f)
1375 o = fl.count()
1375 o = fl.count()
1376 n = fl.addgroup(getgroup(), revmap, tr)
1376 n = fl.addgroup(getgroup(), revmap, tr)
1377 revisions += fl.count() - o
1377 revisions += fl.count() - o
1378 files += 1
1378 files += 1
1379
1379
1380 newheads = len(self.changelog.heads())
1380 newheads = len(self.changelog.heads())
1381 heads = ""
1381 heads = ""
1382 if oldheads and newheads > oldheads:
1382 if oldheads and newheads > oldheads:
1383 heads = _(" (+%d heads)") % (newheads - oldheads)
1383 heads = _(" (+%d heads)") % (newheads - oldheads)
1384
1384
1385 self.ui.status(_("added %d changesets"
1385 self.ui.status(_("added %d changesets"
1386 " with %d changes to %d files%s\n")
1386 " with %d changes to %d files%s\n")
1387 % (changesets, revisions, files, heads))
1387 % (changesets, revisions, files, heads))
1388
1388
1389 self.hook('pretxnchangegroup', throw=True,
1389 self.hook('pretxnchangegroup', throw=True,
1390 node=hex(self.changelog.node(cor+1)))
1390 node=hex(self.changelog.node(cor+1)))
1391
1391
1392 tr.close()
1392 tr.close()
1393
1393
1394 if changesets > 0:
1394 if changesets > 0:
1395 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1395 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1396
1396
1397 for i in range(cor + 1, cnr + 1):
1397 for i in range(cor + 1, cnr + 1):
1398 self.hook("incoming", node=hex(self.changelog.node(i)))
1398 self.hook("incoming", node=hex(self.changelog.node(i)))
1399
1399
1400 def update(self, node, allow=False, force=False, choose=None,
1400 def update(self, node, allow=False, force=False, choose=None,
1401 moddirstate=True, forcemerge=False, wlock=None):
1401 moddirstate=True, forcemerge=False, wlock=None):
1402 pl = self.dirstate.parents()
1402 pl = self.dirstate.parents()
1403 if not force and pl[1] != nullid:
1403 if not force and pl[1] != nullid:
1404 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1404 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1405 return 1
1405 return 1
1406
1406
1407 err = False
1407 err = False
1408
1408
1409 p1, p2 = pl[0], node
1409 p1, p2 = pl[0], node
1410 pa = self.changelog.ancestor(p1, p2)
1410 pa = self.changelog.ancestor(p1, p2)
1411 m1n = self.changelog.read(p1)[0]
1411 m1n = self.changelog.read(p1)[0]
1412 m2n = self.changelog.read(p2)[0]
1412 m2n = self.changelog.read(p2)[0]
1413 man = self.manifest.ancestor(m1n, m2n)
1413 man = self.manifest.ancestor(m1n, m2n)
1414 m1 = self.manifest.read(m1n)
1414 m1 = self.manifest.read(m1n)
1415 mf1 = self.manifest.readflags(m1n)
1415 mf1 = self.manifest.readflags(m1n)
1416 m2 = self.manifest.read(m2n).copy()
1416 m2 = self.manifest.read(m2n).copy()
1417 mf2 = self.manifest.readflags(m2n)
1417 mf2 = self.manifest.readflags(m2n)
1418 ma = self.manifest.read(man)
1418 ma = self.manifest.read(man)
1419 mfa = self.manifest.readflags(man)
1419 mfa = self.manifest.readflags(man)
1420
1420
1421 modified, added, removed, deleted, unknown = self.changes()
1421 modified, added, removed, deleted, unknown = self.changes()
1422
1422
1423 # is this a jump, or a merge? i.e. is there a linear path
1423 # is this a jump, or a merge? i.e. is there a linear path
1424 # from p1 to p2?
1424 # from p1 to p2?
1425 linear_path = (pa == p1 or pa == p2)
1425 linear_path = (pa == p1 or pa == p2)
1426
1426
1427 if allow and linear_path:
1427 if allow and linear_path:
1428 raise util.Abort(_("there is nothing to merge, "
1428 raise util.Abort(_("there is nothing to merge, "
1429 "just use 'hg update'"))
1429 "just use 'hg update'"))
1430 if allow and not forcemerge:
1430 if allow and not forcemerge:
1431 if modified or added or removed:
1431 if modified or added or removed:
1432 raise util.Abort(_("outstanding uncommited changes"))
1432 raise util.Abort(_("outstanding uncommited changes"))
1433 if not forcemerge and not force:
1433 if not forcemerge and not force:
1434 for f in unknown:
1434 for f in unknown:
1435 if f in m2:
1435 if f in m2:
1436 t1 = self.wread(f)
1436 t1 = self.wread(f)
1437 t2 = self.file(f).read(m2[f])
1437 t2 = self.file(f).read(m2[f])
1438 if cmp(t1, t2) != 0:
1438 if cmp(t1, t2) != 0:
1439 raise util.Abort(_("'%s' already exists in the working"
1439 raise util.Abort(_("'%s' already exists in the working"
1440 " dir and differs from remote") % f)
1440 " dir and differs from remote") % f)
1441
1441
1442 # resolve the manifest to determine which files
1442 # resolve the manifest to determine which files
1443 # we care about merging
1443 # we care about merging
1444 self.ui.note(_("resolving manifests\n"))
1444 self.ui.note(_("resolving manifests\n"))
1445 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1445 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1446 (force, allow, moddirstate, linear_path))
1446 (force, allow, moddirstate, linear_path))
1447 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1447 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1448 (short(man), short(m1n), short(m2n)))
1448 (short(man), short(m1n), short(m2n)))
1449
1449
1450 merge = {}
1450 merge = {}
1451 get = {}
1451 get = {}
1452 remove = []
1452 remove = []
1453
1453
1454 # construct a working dir manifest
1454 # construct a working dir manifest
1455 mw = m1.copy()
1455 mw = m1.copy()
1456 mfw = mf1.copy()
1456 mfw = mf1.copy()
1457 umap = dict.fromkeys(unknown)
1457 umap = dict.fromkeys(unknown)
1458
1458
1459 for f in added + modified + unknown:
1459 for f in added + modified + unknown:
1460 mw[f] = ""
1460 mw[f] = ""
1461 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1461 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1462
1462
1463 if moddirstate and not wlock:
1463 if moddirstate and not wlock:
1464 wlock = self.wlock()
1464 wlock = self.wlock()
1465
1465
1466 for f in deleted + removed:
1466 for f in deleted + removed:
1467 if f in mw:
1467 if f in mw:
1468 del mw[f]
1468 del mw[f]
1469
1469
1470 # If we're jumping between revisions (as opposed to merging),
1470 # If we're jumping between revisions (as opposed to merging),
1471 # and if neither the working directory nor the target rev has
1471 # and if neither the working directory nor the target rev has
1472 # the file, then we need to remove it from the dirstate, to
1472 # the file, then we need to remove it from the dirstate, to
1473 # prevent the dirstate from listing the file when it is no
1473 # prevent the dirstate from listing the file when it is no
1474 # longer in the manifest.
1474 # longer in the manifest.
1475 if moddirstate and linear_path and f not in m2:
1475 if moddirstate and linear_path and f not in m2:
1476 self.dirstate.forget((f,))
1476 self.dirstate.forget((f,))
1477
1477
1478 # Compare manifests
1478 # Compare manifests
1479 for f, n in mw.iteritems():
1479 for f, n in mw.iteritems():
1480 if choose and not choose(f):
1480 if choose and not choose(f):
1481 continue
1481 continue
1482 if f in m2:
1482 if f in m2:
1483 s = 0
1483 s = 0
1484
1484
1485 # is the wfile new since m1, and match m2?
1485 # is the wfile new since m1, and match m2?
1486 if f not in m1:
1486 if f not in m1:
1487 t1 = self.wread(f)
1487 t1 = self.wread(f)
1488 t2 = self.file(f).read(m2[f])
1488 t2 = self.file(f).read(m2[f])
1489 if cmp(t1, t2) == 0:
1489 if cmp(t1, t2) == 0:
1490 n = m2[f]
1490 n = m2[f]
1491 del t1, t2
1491 del t1, t2
1492
1492
1493 # are files different?
1493 # are files different?
1494 if n != m2[f]:
1494 if n != m2[f]:
1495 a = ma.get(f, nullid)
1495 a = ma.get(f, nullid)
1496 # are both different from the ancestor?
1496 # are both different from the ancestor?
1497 if n != a and m2[f] != a:
1497 if n != a and m2[f] != a:
1498 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1498 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1499 # merge executable bits
1499 # merge executable bits
1500 # "if we changed or they changed, change in merge"
1500 # "if we changed or they changed, change in merge"
1501 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1501 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1502 mode = ((a^b) | (a^c)) ^ a
1502 mode = ((a^b) | (a^c)) ^ a
1503 merge[f] = (m1.get(f, nullid), m2[f], mode)
1503 merge[f] = (m1.get(f, nullid), m2[f], mode)
1504 s = 1
1504 s = 1
1505 # are we clobbering?
1505 # are we clobbering?
1506 # is remote's version newer?
1506 # is remote's version newer?
1507 # or are we going back in time?
1507 # or are we going back in time?
1508 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1508 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1509 self.ui.debug(_(" remote %s is newer, get\n") % f)
1509 self.ui.debug(_(" remote %s is newer, get\n") % f)
1510 get[f] = m2[f]
1510 get[f] = m2[f]
1511 s = 1
1511 s = 1
1512 elif f in umap:
1512 elif f in umap:
1513 # this unknown file is the same as the checkout
1513 # this unknown file is the same as the checkout
1514 get[f] = m2[f]
1514 get[f] = m2[f]
1515
1515
1516 if not s and mfw[f] != mf2[f]:
1516 if not s and mfw[f] != mf2[f]:
1517 if force:
1517 if force:
1518 self.ui.debug(_(" updating permissions for %s\n") % f)
1518 self.ui.debug(_(" updating permissions for %s\n") % f)
1519 util.set_exec(self.wjoin(f), mf2[f])
1519 util.set_exec(self.wjoin(f), mf2[f])
1520 else:
1520 else:
1521 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1521 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1522 mode = ((a^b) | (a^c)) ^ a
1522 mode = ((a^b) | (a^c)) ^ a
1523 if mode != b:
1523 if mode != b:
1524 self.ui.debug(_(" updating permissions for %s\n")
1524 self.ui.debug(_(" updating permissions for %s\n")
1525 % f)
1525 % f)
1526 util.set_exec(self.wjoin(f), mode)
1526 util.set_exec(self.wjoin(f), mode)
1527 del m2[f]
1527 del m2[f]
1528 elif f in ma:
1528 elif f in ma:
1529 if n != ma[f]:
1529 if n != ma[f]:
1530 r = _("d")
1530 r = _("d")
1531 if not force and (linear_path or allow):
1531 if not force and (linear_path or allow):
1532 r = self.ui.prompt(
1532 r = self.ui.prompt(
1533 (_(" local changed %s which remote deleted\n") % f) +
1533 (_(" local changed %s which remote deleted\n") % f) +
1534 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1534 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1535 if r == _("d"):
1535 if r == _("d"):
1536 remove.append(f)
1536 remove.append(f)
1537 else:
1537 else:
1538 self.ui.debug(_("other deleted %s\n") % f)
1538 self.ui.debug(_("other deleted %s\n") % f)
1539 remove.append(f) # other deleted it
1539 remove.append(f) # other deleted it
1540 else:
1540 else:
1541 # file is created on branch or in working directory
1541 # file is created on branch or in working directory
1542 if force and f not in umap:
1542 if force and f not in umap:
1543 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1543 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1544 remove.append(f)
1544 remove.append(f)
1545 elif n == m1.get(f, nullid): # same as parent
1545 elif n == m1.get(f, nullid): # same as parent
1546 if p2 == pa: # going backwards?
1546 if p2 == pa: # going backwards?
1547 self.ui.debug(_("remote deleted %s\n") % f)
1547 self.ui.debug(_("remote deleted %s\n") % f)
1548 remove.append(f)
1548 remove.append(f)
1549 else:
1549 else:
1550 self.ui.debug(_("local modified %s, keeping\n") % f)
1550 self.ui.debug(_("local modified %s, keeping\n") % f)
1551 else:
1551 else:
1552 self.ui.debug(_("working dir created %s, keeping\n") % f)
1552 self.ui.debug(_("working dir created %s, keeping\n") % f)
1553
1553
1554 for f, n in m2.iteritems():
1554 for f, n in m2.iteritems():
1555 if choose and not choose(f):
1555 if choose and not choose(f):
1556 continue
1556 continue
1557 if f[0] == "/":
1557 if f[0] == "/":
1558 continue
1558 continue
1559 if f in ma and n != ma[f]:
1559 if f in ma and n != ma[f]:
1560 r = _("k")
1560 r = _("k")
1561 if not force and (linear_path or allow):
1561 if not force and (linear_path or allow):
1562 r = self.ui.prompt(
1562 r = self.ui.prompt(
1563 (_("remote changed %s which local deleted\n") % f) +
1563 (_("remote changed %s which local deleted\n") % f) +
1564 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1564 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1565 if r == _("k"):
1565 if r == _("k"):
1566 get[f] = n
1566 get[f] = n
1567 elif f not in ma:
1567 elif f not in ma:
1568 self.ui.debug(_("remote created %s\n") % f)
1568 self.ui.debug(_("remote created %s\n") % f)
1569 get[f] = n
1569 get[f] = n
1570 else:
1570 else:
1571 if force or p2 == pa: # going backwards?
1571 if force or p2 == pa: # going backwards?
1572 self.ui.debug(_("local deleted %s, recreating\n") % f)
1572 self.ui.debug(_("local deleted %s, recreating\n") % f)
1573 get[f] = n
1573 get[f] = n
1574 else:
1574 else:
1575 self.ui.debug(_("local deleted %s\n") % f)
1575 self.ui.debug(_("local deleted %s\n") % f)
1576
1576
1577 del mw, m1, m2, ma
1577 del mw, m1, m2, ma
1578
1578
1579 if force:
1579 if force:
1580 for f in merge:
1580 for f in merge:
1581 get[f] = merge[f][1]
1581 get[f] = merge[f][1]
1582 merge = {}
1582 merge = {}
1583
1583
1584 if linear_path or force:
1584 if linear_path or force:
1585 # we don't need to do any magic, just jump to the new rev
1585 # we don't need to do any magic, just jump to the new rev
1586 branch_merge = False
1586 branch_merge = False
1587 p1, p2 = p2, nullid
1587 p1, p2 = p2, nullid
1588 else:
1588 else:
1589 if not allow:
1589 if not allow:
1590 self.ui.status(_("this update spans a branch"
1590 self.ui.status(_("this update spans a branch"
1591 " affecting the following files:\n"))
1591 " affecting the following files:\n"))
1592 fl = merge.keys() + get.keys()
1592 fl = merge.keys() + get.keys()
1593 fl.sort()
1593 fl.sort()
1594 for f in fl:
1594 for f in fl:
1595 cf = ""
1595 cf = ""
1596 if f in merge:
1596 if f in merge:
1597 cf = _(" (resolve)")
1597 cf = _(" (resolve)")
1598 self.ui.status(" %s%s\n" % (f, cf))
1598 self.ui.status(" %s%s\n" % (f, cf))
1599 self.ui.warn(_("aborting update spanning branches!\n"))
1599 self.ui.warn(_("aborting update spanning branches!\n"))
1600 self.ui.status(_("(use update -m to merge across branches"
1600 self.ui.status(_("(use update -m to merge across branches"
1601 " or -C to lose changes)\n"))
1601 " or -C to lose changes)\n"))
1602 return 1
1602 return 1
1603 branch_merge = True
1603 branch_merge = True
1604
1604
1605 # get the files we don't need to change
1605 # get the files we don't need to change
1606 files = get.keys()
1606 files = get.keys()
1607 files.sort()
1607 files.sort()
1608 for f in files:
1608 for f in files:
1609 if f[0] == "/":
1609 if f[0] == "/":
1610 continue
1610 continue
1611 self.ui.note(_("getting %s\n") % f)
1611 self.ui.note(_("getting %s\n") % f)
1612 t = self.file(f).read(get[f])
1612 t = self.file(f).read(get[f])
1613 self.wwrite(f, t)
1613 self.wwrite(f, t)
1614 util.set_exec(self.wjoin(f), mf2[f])
1614 util.set_exec(self.wjoin(f), mf2[f])
1615 if moddirstate:
1615 if moddirstate:
1616 if branch_merge:
1616 if branch_merge:
1617 self.dirstate.update([f], 'n', st_mtime=-1)
1617 self.dirstate.update([f], 'n', st_mtime=-1)
1618 else:
1618 else:
1619 self.dirstate.update([f], 'n')
1619 self.dirstate.update([f], 'n')
1620
1620
1621 # merge the tricky bits
1621 # merge the tricky bits
1622 files = merge.keys()
1622 files = merge.keys()
1623 files.sort()
1623 files.sort()
1624 for f in files:
1624 for f in files:
1625 self.ui.status(_("merging %s\n") % f)
1625 self.ui.status(_("merging %s\n") % f)
1626 my, other, flag = merge[f]
1626 my, other, flag = merge[f]
1627 ret = self.merge3(f, my, other)
1627 ret = self.merge3(f, my, other)
1628 if ret:
1628 if ret:
1629 err = True
1629 err = True
1630 util.set_exec(self.wjoin(f), flag)
1630 util.set_exec(self.wjoin(f), flag)
1631 if moddirstate:
1631 if moddirstate:
1632 if branch_merge:
1632 if branch_merge:
1633 # We've done a branch merge, mark this file as merged
1633 # We've done a branch merge, mark this file as merged
1634 # so that we properly record the merger later
1634 # so that we properly record the merger later
1635 self.dirstate.update([f], 'm')
1635 self.dirstate.update([f], 'm')
1636 else:
1636 else:
1637 # We've update-merged a locally modified file, so
1637 # We've update-merged a locally modified file, so
1638 # we set the dirstate to emulate a normal checkout
1638 # we set the dirstate to emulate a normal checkout
1639 # of that file some time in the past. Thus our
1639 # of that file some time in the past. Thus our
1640 # merge will appear as a normal local file
1640 # merge will appear as a normal local file
1641 # modification.
1641 # modification.
1642 f_len = len(self.file(f).read(other))
1642 f_len = len(self.file(f).read(other))
1643 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1643 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1644
1644
1645 remove.sort()
1645 remove.sort()
1646 for f in remove:
1646 for f in remove:
1647 self.ui.note(_("removing %s\n") % f)
1647 self.ui.note(_("removing %s\n") % f)
1648 try:
1648 try:
1649 util.unlink(self.wjoin(f))
1649 util.unlink(self.wjoin(f))
1650 except OSError, inst:
1650 except OSError, inst:
1651 if inst.errno != errno.ENOENT:
1651 if inst.errno != errno.ENOENT:
1652 self.ui.warn(_("update failed to remove %s: %s!\n") %
1652 self.ui.warn(_("update failed to remove %s: %s!\n") %
1653 (f, inst.strerror))
1653 (f, inst.strerror))
1654 if moddirstate:
1654 if moddirstate:
1655 if branch_merge:
1655 if branch_merge:
1656 self.dirstate.update(remove, 'r')
1656 self.dirstate.update(remove, 'r')
1657 else:
1657 else:
1658 self.dirstate.forget(remove)
1658 self.dirstate.forget(remove)
1659
1659
1660 if moddirstate:
1660 if moddirstate:
1661 self.dirstate.setparents(p1, p2)
1661 self.dirstate.setparents(p1, p2)
1662 return err
1662 return err
1663
1663
1664 def merge3(self, fn, my, other):
1664 def merge3(self, fn, my, other):
1665 """perform a 3-way merge in the working directory"""
1665 """perform a 3-way merge in the working directory"""
1666
1666
1667 def temp(prefix, node):
1667 def temp(prefix, node):
1668 pre = "%s~%s." % (os.path.basename(fn), prefix)
1668 pre = "%s~%s." % (os.path.basename(fn), prefix)
1669 (fd, name) = tempfile.mkstemp("", pre)
1669 (fd, name) = tempfile.mkstemp("", pre)
1670 f = os.fdopen(fd, "wb")
1670 f = os.fdopen(fd, "wb")
1671 self.wwrite(fn, fl.read(node), f)
1671 self.wwrite(fn, fl.read(node), f)
1672 f.close()
1672 f.close()
1673 return name
1673 return name
1674
1674
1675 fl = self.file(fn)
1675 fl = self.file(fn)
1676 base = fl.ancestor(my, other)
1676 base = fl.ancestor(my, other)
1677 a = self.wjoin(fn)
1677 a = self.wjoin(fn)
1678 b = temp("base", base)
1678 b = temp("base", base)
1679 c = temp("other", other)
1679 c = temp("other", other)
1680
1680
1681 self.ui.note(_("resolving %s\n") % fn)
1681 self.ui.note(_("resolving %s\n") % fn)
1682 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1682 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1683 (fn, short(my), short(other), short(base)))
1683 (fn, short(my), short(other), short(base)))
1684
1684
1685 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1685 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1686 or "hgmerge")
1686 or "hgmerge")
1687 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1687 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1688 if r:
1688 if r:
1689 self.ui.warn(_("merging %s failed!\n") % fn)
1689 self.ui.warn(_("merging %s failed!\n") % fn)
1690
1690
1691 os.unlink(b)
1691 os.unlink(b)
1692 os.unlink(c)
1692 os.unlink(c)
1693 return r
1693 return r
1694
1694
1695 def verify(self):
1695 def verify(self):
1696 filelinkrevs = {}
1696 filelinkrevs = {}
1697 filenodes = {}
1697 filenodes = {}
1698 changesets = revisions = files = 0
1698 changesets = revisions = files = 0
1699 errors = [0]
1699 errors = [0]
1700 neededmanifests = {}
1700 neededmanifests = {}
1701
1701
1702 def err(msg):
1702 def err(msg):
1703 self.ui.warn(msg + "\n")
1703 self.ui.warn(msg + "\n")
1704 errors[0] += 1
1704 errors[0] += 1
1705
1705
1706 def checksize(obj, name):
1706 def checksize(obj, name):
1707 d = obj.checksize()
1707 d = obj.checksize()
1708 if d[0]:
1708 if d[0]:
1709 err(_("%s data length off by %d bytes") % (name, d[0]))
1709 err(_("%s data length off by %d bytes") % (name, d[0]))
1710 if d[1]:
1710 if d[1]:
1711 err(_("%s index contains %d extra bytes") % (name, d[1]))
1711 err(_("%s index contains %d extra bytes") % (name, d[1]))
1712
1712
1713 seen = {}
1713 seen = {}
1714 self.ui.status(_("checking changesets\n"))
1714 self.ui.status(_("checking changesets\n"))
1715 checksize(self.changelog, "changelog")
1715 checksize(self.changelog, "changelog")
1716
1716
1717 for i in range(self.changelog.count()):
1717 for i in range(self.changelog.count()):
1718 changesets += 1
1718 changesets += 1
1719 n = self.changelog.node(i)
1719 n = self.changelog.node(i)
1720 l = self.changelog.linkrev(n)
1720 l = self.changelog.linkrev(n)
1721 if l != i:
1721 if l != i:
1722 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1722 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1723 if n in seen:
1723 if n in seen:
1724 err(_("duplicate changeset at revision %d") % i)
1724 err(_("duplicate changeset at revision %d") % i)
1725 seen[n] = 1
1725 seen[n] = 1
1726
1726
1727 for p in self.changelog.parents(n):
1727 for p in self.changelog.parents(n):
1728 if p not in self.changelog.nodemap:
1728 if p not in self.changelog.nodemap:
1729 err(_("changeset %s has unknown parent %s") %
1729 err(_("changeset %s has unknown parent %s") %
1730 (short(n), short(p)))
1730 (short(n), short(p)))
1731 try:
1731 try:
1732 changes = self.changelog.read(n)
1732 changes = self.changelog.read(n)
1733 except KeyboardInterrupt:
1733 except KeyboardInterrupt:
1734 self.ui.warn(_("interrupted"))
1734 self.ui.warn(_("interrupted"))
1735 raise
1735 raise
1736 except Exception, inst:
1736 except Exception, inst:
1737 err(_("unpacking changeset %s: %s") % (short(n), inst))
1737 err(_("unpacking changeset %s: %s") % (short(n), inst))
1738
1738
1739 neededmanifests[changes[0]] = n
1739 neededmanifests[changes[0]] = n
1740
1740
1741 for f in changes[3]:
1741 for f in changes[3]:
1742 filelinkrevs.setdefault(f, []).append(i)
1742 filelinkrevs.setdefault(f, []).append(i)
1743
1743
1744 seen = {}
1744 seen = {}
1745 self.ui.status(_("checking manifests\n"))
1745 self.ui.status(_("checking manifests\n"))
1746 checksize(self.manifest, "manifest")
1746 checksize(self.manifest, "manifest")
1747
1747
1748 for i in range(self.manifest.count()):
1748 for i in range(self.manifest.count()):
1749 n = self.manifest.node(i)
1749 n = self.manifest.node(i)
1750 l = self.manifest.linkrev(n)
1750 l = self.manifest.linkrev(n)
1751
1751
1752 if l < 0 or l >= self.changelog.count():
1752 if l < 0 or l >= self.changelog.count():
1753 err(_("bad manifest link (%d) at revision %d") % (l, i))
1753 err(_("bad manifest link (%d) at revision %d") % (l, i))
1754
1754
1755 if n in neededmanifests:
1755 if n in neededmanifests:
1756 del neededmanifests[n]
1756 del neededmanifests[n]
1757
1757
1758 if n in seen:
1758 if n in seen:
1759 err(_("duplicate manifest at revision %d") % i)
1759 err(_("duplicate manifest at revision %d") % i)
1760
1760
1761 seen[n] = 1
1761 seen[n] = 1
1762
1762
1763 for p in self.manifest.parents(n):
1763 for p in self.manifest.parents(n):
1764 if p not in self.manifest.nodemap:
1764 if p not in self.manifest.nodemap:
1765 err(_("manifest %s has unknown parent %s") %
1765 err(_("manifest %s has unknown parent %s") %
1766 (short(n), short(p)))
1766 (short(n), short(p)))
1767
1767
1768 try:
1768 try:
1769 delta = mdiff.patchtext(self.manifest.delta(n))
1769 delta = mdiff.patchtext(self.manifest.delta(n))
1770 except KeyboardInterrupt:
1770 except KeyboardInterrupt:
1771 self.ui.warn(_("interrupted"))
1771 self.ui.warn(_("interrupted"))
1772 raise
1772 raise
1773 except Exception, inst:
1773 except Exception, inst:
1774 err(_("unpacking manifest %s: %s") % (short(n), inst))
1774 err(_("unpacking manifest %s: %s") % (short(n), inst))
1775
1775
1776 ff = [ l.split('\0') for l in delta.splitlines() ]
1776 ff = [ l.split('\0') for l in delta.splitlines() ]
1777 for f, fn in ff:
1777 for f, fn in ff:
1778 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1778 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1779
1779
1780 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1780 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1781
1781
1782 for m, c in neededmanifests.items():
1782 for m, c in neededmanifests.items():
1783 err(_("Changeset %s refers to unknown manifest %s") %
1783 err(_("Changeset %s refers to unknown manifest %s") %
1784 (short(m), short(c)))
1784 (short(m), short(c)))
1785 del neededmanifests
1785 del neededmanifests
1786
1786
1787 for f in filenodes:
1787 for f in filenodes:
1788 if f not in filelinkrevs:
1788 if f not in filelinkrevs:
1789 err(_("file %s in manifest but not in changesets") % f)
1789 err(_("file %s in manifest but not in changesets") % f)
1790
1790
1791 for f in filelinkrevs:
1791 for f in filelinkrevs:
1792 if f not in filenodes:
1792 if f not in filenodes:
1793 err(_("file %s in changeset but not in manifest") % f)
1793 err(_("file %s in changeset but not in manifest") % f)
1794
1794
1795 self.ui.status(_("checking files\n"))
1795 self.ui.status(_("checking files\n"))
1796 ff = filenodes.keys()
1796 ff = filenodes.keys()
1797 ff.sort()
1797 ff.sort()
1798 for f in ff:
1798 for f in ff:
1799 if f == "/dev/null":
1799 if f == "/dev/null":
1800 continue
1800 continue
1801 files += 1
1801 files += 1
1802 fl = self.file(f)
1802 fl = self.file(f)
1803 checksize(fl, f)
1803 checksize(fl, f)
1804
1804
1805 nodes = {nullid: 1}
1805 nodes = {nullid: 1}
1806 seen = {}
1806 seen = {}
1807 for i in range(fl.count()):
1807 for i in range(fl.count()):
1808 revisions += 1
1808 revisions += 1
1809 n = fl.node(i)
1809 n = fl.node(i)
1810
1810
1811 if n in seen:
1811 if n in seen:
1812 err(_("%s: duplicate revision %d") % (f, i))
1812 err(_("%s: duplicate revision %d") % (f, i))
1813 if n not in filenodes[f]:
1813 if n not in filenodes[f]:
1814 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1814 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1815 else:
1815 else:
1816 del filenodes[f][n]
1816 del filenodes[f][n]
1817
1817
1818 flr = fl.linkrev(n)
1818 flr = fl.linkrev(n)
1819 if flr not in filelinkrevs[f]:
1819 if flr not in filelinkrevs[f]:
1820 err(_("%s:%s points to unexpected changeset %d")
1820 err(_("%s:%s points to unexpected changeset %d")
1821 % (f, short(n), flr))
1821 % (f, short(n), flr))
1822 else:
1822 else:
1823 filelinkrevs[f].remove(flr)
1823 filelinkrevs[f].remove(flr)
1824
1824
1825 # verify contents
1825 # verify contents
1826 try:
1826 try:
1827 t = fl.read(n)
1827 t = fl.read(n)
1828 except KeyboardInterrupt:
1828 except KeyboardInterrupt:
1829 self.ui.warn(_("interrupted"))
1829 self.ui.warn(_("interrupted"))
1830 raise
1830 raise
1831 except Exception, inst:
1831 except Exception, inst:
1832 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1832 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1833
1833
1834 # verify parents
1834 # verify parents
1835 (p1, p2) = fl.parents(n)
1835 (p1, p2) = fl.parents(n)
1836 if p1 not in nodes:
1836 if p1 not in nodes:
1837 err(_("file %s:%s unknown parent 1 %s") %
1837 err(_("file %s:%s unknown parent 1 %s") %
1838 (f, short(n), short(p1)))
1838 (f, short(n), short(p1)))
1839 if p2 not in nodes:
1839 if p2 not in nodes:
1840 err(_("file %s:%s unknown parent 2 %s") %
1840 err(_("file %s:%s unknown parent 2 %s") %
1841 (f, short(n), short(p1)))
1841 (f, short(n), short(p1)))
1842 nodes[n] = 1
1842 nodes[n] = 1
1843
1843
1844 # cross-check
1844 # cross-check
1845 for node in filenodes[f]:
1845 for node in filenodes[f]:
1846 err(_("node %s in manifests not in %s") % (hex(node), f))
1846 err(_("node %s in manifests not in %s") % (hex(node), f))
1847
1847
1848 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1848 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1849 (files, changesets, revisions))
1849 (files, changesets, revisions))
1850
1850
1851 if errors[0]:
1851 if errors[0]:
1852 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1852 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1853 return 1
1853 return 1
General Comments 0
You need to be logged in to leave comments. Login now