##// END OF EJS Templates
Allow repo lock to be passed in to localrepo.commit for performance
mason@suse.com -
r1807:f1f43ea2 default
parent child Browse files
Show More
@@ -1,1896 +1,1897 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct, os, util
8 import struct, os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14
14
15 class localrepository(object):
15 class localrepository(object):
16 def __del__(self):
16 def __del__(self):
17 self.transhandle = None
17 self.transhandle = None
18 def __init__(self, ui, path=None, create=0):
18 def __init__(self, ui, path=None, create=0):
19 if not path:
19 if not path:
20 p = os.getcwd()
20 p = os.getcwd()
21 while not os.path.isdir(os.path.join(p, ".hg")):
21 while not os.path.isdir(os.path.join(p, ".hg")):
22 oldp = p
22 oldp = p
23 p = os.path.dirname(p)
23 p = os.path.dirname(p)
24 if p == oldp:
24 if p == oldp:
25 raise repo.RepoError(_("no repo found"))
25 raise repo.RepoError(_("no repo found"))
26 path = p
26 path = p
27 self.path = os.path.join(path, ".hg")
27 self.path = os.path.join(path, ".hg")
28
28
29 if not create and not os.path.isdir(self.path):
29 if not create and not os.path.isdir(self.path):
30 raise repo.RepoError(_("repository %s not found") % path)
30 raise repo.RepoError(_("repository %s not found") % path)
31
31
32 self.root = os.path.abspath(path)
32 self.root = os.path.abspath(path)
33 self.ui = ui
33 self.ui = ui
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.manifest = manifest.manifest(self.opener)
36 self.manifest = manifest.manifest(self.opener)
37 self.changelog = changelog.changelog(self.opener)
37 self.changelog = changelog.changelog(self.opener)
38 self.tagscache = None
38 self.tagscache = None
39 self.nodetagscache = None
39 self.nodetagscache = None
40 self.encodepats = None
40 self.encodepats = None
41 self.decodepats = None
41 self.decodepats = None
42 self.transhandle = None
42 self.transhandle = None
43
43
44 if create:
44 if create:
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 os.mkdir(self.join("data"))
46 os.mkdir(self.join("data"))
47
47
48 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
48 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"))
50 self.ui.readconfig(self.join("hgrc"))
51 except IOError:
51 except IOError:
52 pass
52 pass
53
53
54 def hook(self, name, throw=False, **args):
54 def hook(self, name, throw=False, **args):
55 def runhook(name, cmd):
55 def runhook(name, cmd):
56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
57 old = {}
57 old = {}
58 for k, v in args.items():
58 for k, v in args.items():
59 k = k.upper()
59 k = k.upper()
60 old['HG_' + k] = os.environ.get(k, None)
60 old['HG_' + k] = os.environ.get(k, None)
61 old[k] = os.environ.get(k, None)
61 old[k] = os.environ.get(k, None)
62 os.environ['HG_' + k] = str(v)
62 os.environ['HG_' + k] = str(v)
63 os.environ[k] = str(v)
63 os.environ[k] = str(v)
64
64
65 try:
65 try:
66 # Hooks run in the repository root
66 # Hooks run in the repository root
67 olddir = os.getcwd()
67 olddir = os.getcwd()
68 os.chdir(self.root)
68 os.chdir(self.root)
69 r = os.system(cmd)
69 r = os.system(cmd)
70 finally:
70 finally:
71 for k, v in old.items():
71 for k, v in old.items():
72 if v is not None:
72 if v is not None:
73 os.environ[k] = v
73 os.environ[k] = v
74 else:
74 else:
75 del os.environ[k]
75 del os.environ[k]
76
76
77 os.chdir(olddir)
77 os.chdir(olddir)
78
78
79 if r:
79 if r:
80 desc, r = util.explain_exit(r)
80 desc, r = util.explain_exit(r)
81 if throw:
81 if throw:
82 raise util.Abort(_('%s hook %s') % (name, desc))
82 raise util.Abort(_('%s hook %s') % (name, desc))
83 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
83 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
84 return False
84 return False
85 return True
85 return True
86
86
87 r = True
87 r = True
88 for hname, cmd in self.ui.configitems("hooks"):
88 for hname, cmd in self.ui.configitems("hooks"):
89 s = hname.split(".")
89 s = hname.split(".")
90 if s[0] == name and cmd:
90 if s[0] == name and cmd:
91 r = runhook(hname, cmd) and r
91 r = runhook(hname, cmd) and r
92 return r
92 return r
93
93
94 def tags(self):
94 def tags(self):
95 '''return a mapping of tag to node'''
95 '''return a mapping of tag to node'''
96 if not self.tagscache:
96 if not self.tagscache:
97 self.tagscache = {}
97 self.tagscache = {}
98 def addtag(self, k, n):
98 def addtag(self, k, n):
99 try:
99 try:
100 bin_n = bin(n)
100 bin_n = bin(n)
101 except TypeError:
101 except TypeError:
102 bin_n = ''
102 bin_n = ''
103 self.tagscache[k.strip()] = bin_n
103 self.tagscache[k.strip()] = bin_n
104
104
105 try:
105 try:
106 # read each head of the tags file, ending with the tip
106 # read each head of the tags file, ending with the tip
107 # and add each tag found to the map, with "newer" ones
107 # and add each tag found to the map, with "newer" ones
108 # taking precedence
108 # taking precedence
109 fl = self.file(".hgtags")
109 fl = self.file(".hgtags")
110 h = fl.heads()
110 h = fl.heads()
111 h.reverse()
111 h.reverse()
112 for r in h:
112 for r in h:
113 for l in fl.read(r).splitlines():
113 for l in fl.read(r).splitlines():
114 if l:
114 if l:
115 n, k = l.split(" ", 1)
115 n, k = l.split(" ", 1)
116 addtag(self, k, n)
116 addtag(self, k, n)
117 except KeyError:
117 except KeyError:
118 pass
118 pass
119
119
120 try:
120 try:
121 f = self.opener("localtags")
121 f = self.opener("localtags")
122 for l in f:
122 for l in f:
123 n, k = l.split(" ", 1)
123 n, k = l.split(" ", 1)
124 addtag(self, k, n)
124 addtag(self, k, n)
125 except IOError:
125 except IOError:
126 pass
126 pass
127
127
128 self.tagscache['tip'] = self.changelog.tip()
128 self.tagscache['tip'] = self.changelog.tip()
129
129
130 return self.tagscache
130 return self.tagscache
131
131
132 def tagslist(self):
132 def tagslist(self):
133 '''return a list of tags ordered by revision'''
133 '''return a list of tags ordered by revision'''
134 l = []
134 l = []
135 for t, n in self.tags().items():
135 for t, n in self.tags().items():
136 try:
136 try:
137 r = self.changelog.rev(n)
137 r = self.changelog.rev(n)
138 except:
138 except:
139 r = -2 # sort to the beginning of the list if unknown
139 r = -2 # sort to the beginning of the list if unknown
140 l.append((r, t, n))
140 l.append((r, t, n))
141 l.sort()
141 l.sort()
142 return [(t, n) for r, t, n in l]
142 return [(t, n) for r, t, n in l]
143
143
144 def nodetags(self, node):
144 def nodetags(self, node):
145 '''return the tags associated with a node'''
145 '''return the tags associated with a node'''
146 if not self.nodetagscache:
146 if not self.nodetagscache:
147 self.nodetagscache = {}
147 self.nodetagscache = {}
148 for t, n in self.tags().items():
148 for t, n in self.tags().items():
149 self.nodetagscache.setdefault(n, []).append(t)
149 self.nodetagscache.setdefault(n, []).append(t)
150 return self.nodetagscache.get(node, [])
150 return self.nodetagscache.get(node, [])
151
151
152 def lookup(self, key):
152 def lookup(self, key):
153 try:
153 try:
154 return self.tags()[key]
154 return self.tags()[key]
155 except KeyError:
155 except KeyError:
156 try:
156 try:
157 return self.changelog.lookup(key)
157 return self.changelog.lookup(key)
158 except:
158 except:
159 raise repo.RepoError(_("unknown revision '%s'") % key)
159 raise repo.RepoError(_("unknown revision '%s'") % key)
160
160
161 def dev(self):
161 def dev(self):
162 return os.stat(self.path).st_dev
162 return os.stat(self.path).st_dev
163
163
164 def local(self):
164 def local(self):
165 return True
165 return True
166
166
167 def join(self, f):
167 def join(self, f):
168 return os.path.join(self.path, f)
168 return os.path.join(self.path, f)
169
169
170 def wjoin(self, f):
170 def wjoin(self, f):
171 return os.path.join(self.root, f)
171 return os.path.join(self.root, f)
172
172
173 def file(self, f):
173 def file(self, f):
174 if f[0] == '/':
174 if f[0] == '/':
175 f = f[1:]
175 f = f[1:]
176 return filelog.filelog(self.opener, f)
176 return filelog.filelog(self.opener, f)
177
177
178 def getcwd(self):
178 def getcwd(self):
179 return self.dirstate.getcwd()
179 return self.dirstate.getcwd()
180
180
181 def wfile(self, f, mode='r'):
181 def wfile(self, f, mode='r'):
182 return self.wopener(f, mode)
182 return self.wopener(f, mode)
183
183
184 def wread(self, filename):
184 def wread(self, filename):
185 if self.encodepats == None:
185 if self.encodepats == None:
186 l = []
186 l = []
187 for pat, cmd in self.ui.configitems("encode"):
187 for pat, cmd in self.ui.configitems("encode"):
188 mf = util.matcher("", "/", [pat], [], [])[1]
188 mf = util.matcher("", "/", [pat], [], [])[1]
189 l.append((mf, cmd))
189 l.append((mf, cmd))
190 self.encodepats = l
190 self.encodepats = l
191
191
192 data = self.wopener(filename, 'r').read()
192 data = self.wopener(filename, 'r').read()
193
193
194 for mf, cmd in self.encodepats:
194 for mf, cmd in self.encodepats:
195 if mf(filename):
195 if mf(filename):
196 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
196 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
197 data = util.filter(data, cmd)
197 data = util.filter(data, cmd)
198 break
198 break
199
199
200 return data
200 return data
201
201
202 def wwrite(self, filename, data, fd=None):
202 def wwrite(self, filename, data, fd=None):
203 if self.decodepats == None:
203 if self.decodepats == None:
204 l = []
204 l = []
205 for pat, cmd in self.ui.configitems("decode"):
205 for pat, cmd in self.ui.configitems("decode"):
206 mf = util.matcher("", "/", [pat], [], [])[1]
206 mf = util.matcher("", "/", [pat], [], [])[1]
207 l.append((mf, cmd))
207 l.append((mf, cmd))
208 self.decodepats = l
208 self.decodepats = l
209
209
210 for mf, cmd in self.decodepats:
210 for mf, cmd in self.decodepats:
211 if mf(filename):
211 if mf(filename):
212 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
212 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
213 data = util.filter(data, cmd)
213 data = util.filter(data, cmd)
214 break
214 break
215
215
216 if fd:
216 if fd:
217 return fd.write(data)
217 return fd.write(data)
218 return self.wopener(filename, 'w').write(data)
218 return self.wopener(filename, 'w').write(data)
219
219
220 def transaction(self):
220 def transaction(self):
221 tr = self.transhandle
221 tr = self.transhandle
222 if tr != None and tr.running():
222 if tr != None and tr.running():
223 return tr.nest()
223 return tr.nest()
224
224
225 # save dirstate for undo
225 # save dirstate for undo
226 try:
226 try:
227 ds = self.opener("dirstate").read()
227 ds = self.opener("dirstate").read()
228 except IOError:
228 except IOError:
229 ds = ""
229 ds = ""
230 self.opener("journal.dirstate", "w").write(ds)
230 self.opener("journal.dirstate", "w").write(ds)
231
231
232 tr = transaction.transaction(self.ui.warn, self.opener,
232 tr = transaction.transaction(self.ui.warn, self.opener,
233 self.join("journal"),
233 self.join("journal"),
234 aftertrans(self.path))
234 aftertrans(self.path))
235 self.transhandle = tr
235 self.transhandle = tr
236 return tr
236 return tr
237
237
238 def recover(self):
238 def recover(self):
239 l = self.lock()
239 l = self.lock()
240 if os.path.exists(self.join("journal")):
240 if os.path.exists(self.join("journal")):
241 self.ui.status(_("rolling back interrupted transaction\n"))
241 self.ui.status(_("rolling back interrupted transaction\n"))
242 transaction.rollback(self.opener, self.join("journal"))
242 transaction.rollback(self.opener, self.join("journal"))
243 self.reload()
243 self.reload()
244 return True
244 return True
245 else:
245 else:
246 self.ui.warn(_("no interrupted transaction available\n"))
246 self.ui.warn(_("no interrupted transaction available\n"))
247 return False
247 return False
248
248
249 def undo(self, wlock=None):
249 def undo(self, wlock=None):
250 if not wlock:
250 if not wlock:
251 wlock = self.wlock()
251 wlock = self.wlock()
252 l = self.lock()
252 l = self.lock()
253 if os.path.exists(self.join("undo")):
253 if os.path.exists(self.join("undo")):
254 self.ui.status(_("rolling back last transaction\n"))
254 self.ui.status(_("rolling back last transaction\n"))
255 transaction.rollback(self.opener, self.join("undo"))
255 transaction.rollback(self.opener, self.join("undo"))
256 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
256 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
257 self.reload()
257 self.reload()
258 self.wreload()
258 self.wreload()
259 else:
259 else:
260 self.ui.warn(_("no undo information available\n"))
260 self.ui.warn(_("no undo information available\n"))
261
261
262 def wreload(self):
262 def wreload(self):
263 self.dirstate.read()
263 self.dirstate.read()
264
264
265 def reload(self):
265 def reload(self):
266 self.changelog.load()
266 self.changelog.load()
267 self.manifest.load()
267 self.manifest.load()
268 self.tagscache = None
268 self.tagscache = None
269 self.nodetagscache = None
269 self.nodetagscache = None
270
270
271 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
271 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
272 try:
272 try:
273 l = lock.lock(self.join(lockname), 0, releasefn)
273 l = lock.lock(self.join(lockname), 0, releasefn)
274 except lock.LockHeld, inst:
274 except lock.LockHeld, inst:
275 if not wait:
275 if not wait:
276 raise inst
276 raise inst
277 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
277 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
278 try:
278 try:
279 # default to 600 seconds timeout
279 # default to 600 seconds timeout
280 l = lock.lock(self.join(lockname),
280 l = lock.lock(self.join(lockname),
281 int(self.ui.config("ui", "timeout") or 600),
281 int(self.ui.config("ui", "timeout") or 600),
282 releasefn)
282 releasefn)
283 except lock.LockHeld, inst:
283 except lock.LockHeld, inst:
284 raise util.Abort(_("timeout while waiting for "
284 raise util.Abort(_("timeout while waiting for "
285 "lock held by %s") % inst.args[0])
285 "lock held by %s") % inst.args[0])
286 if acquirefn:
286 if acquirefn:
287 acquirefn()
287 acquirefn()
288 return l
288 return l
289
289
290 def lock(self, wait=1):
290 def lock(self, wait=1):
291 return self.do_lock("lock", wait, acquirefn=self.reload)
291 return self.do_lock("lock", wait, acquirefn=self.reload)
292
292
293 def wlock(self, wait=1):
293 def wlock(self, wait=1):
294 return self.do_lock("wlock", wait,
294 return self.do_lock("wlock", wait,
295 self.dirstate.write,
295 self.dirstate.write,
296 self.wreload)
296 self.wreload)
297
297
298 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
298 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
299 "determine whether a new filenode is needed"
299 "determine whether a new filenode is needed"
300 fp1 = manifest1.get(filename, nullid)
300 fp1 = manifest1.get(filename, nullid)
301 fp2 = manifest2.get(filename, nullid)
301 fp2 = manifest2.get(filename, nullid)
302
302
303 if fp2 != nullid:
303 if fp2 != nullid:
304 # is one parent an ancestor of the other?
304 # is one parent an ancestor of the other?
305 fpa = filelog.ancestor(fp1, fp2)
305 fpa = filelog.ancestor(fp1, fp2)
306 if fpa == fp1:
306 if fpa == fp1:
307 fp1, fp2 = fp2, nullid
307 fp1, fp2 = fp2, nullid
308 elif fpa == fp2:
308 elif fpa == fp2:
309 fp2 = nullid
309 fp2 = nullid
310
310
311 # is the file unmodified from the parent? report existing entry
311 # is the file unmodified from the parent? report existing entry
312 if fp2 == nullid and text == filelog.read(fp1):
312 if fp2 == nullid and text == filelog.read(fp1):
313 return (fp1, None, None)
313 return (fp1, None, None)
314
314
315 return (None, fp1, fp2)
315 return (None, fp1, fp2)
316
316
317 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
317 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
318 orig_parent = self.dirstate.parents()[0] or nullid
318 orig_parent = self.dirstate.parents()[0] or nullid
319 p1 = p1 or self.dirstate.parents()[0] or nullid
319 p1 = p1 or self.dirstate.parents()[0] or nullid
320 p2 = p2 or self.dirstate.parents()[1] or nullid
320 p2 = p2 or self.dirstate.parents()[1] or nullid
321 c1 = self.changelog.read(p1)
321 c1 = self.changelog.read(p1)
322 c2 = self.changelog.read(p2)
322 c2 = self.changelog.read(p2)
323 m1 = self.manifest.read(c1[0])
323 m1 = self.manifest.read(c1[0])
324 mf1 = self.manifest.readflags(c1[0])
324 mf1 = self.manifest.readflags(c1[0])
325 m2 = self.manifest.read(c2[0])
325 m2 = self.manifest.read(c2[0])
326 changed = []
326 changed = []
327
327
328 if orig_parent == p1:
328 if orig_parent == p1:
329 update_dirstate = 1
329 update_dirstate = 1
330 else:
330 else:
331 update_dirstate = 0
331 update_dirstate = 0
332
332
333 if not wlock:
333 if not wlock:
334 wlock = self.wlock()
334 wlock = self.wlock()
335 l = self.lock()
335 l = self.lock()
336 tr = self.transaction()
336 tr = self.transaction()
337 mm = m1.copy()
337 mm = m1.copy()
338 mfm = mf1.copy()
338 mfm = mf1.copy()
339 linkrev = self.changelog.count()
339 linkrev = self.changelog.count()
340 for f in files:
340 for f in files:
341 try:
341 try:
342 t = self.wread(f)
342 t = self.wread(f)
343 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
343 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
344 r = self.file(f)
344 r = self.file(f)
345 mfm[f] = tm
345 mfm[f] = tm
346
346
347 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
347 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
348 if entry:
348 if entry:
349 mm[f] = entry
349 mm[f] = entry
350 continue
350 continue
351
351
352 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
352 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
353 changed.append(f)
353 changed.append(f)
354 if update_dirstate:
354 if update_dirstate:
355 self.dirstate.update([f], "n")
355 self.dirstate.update([f], "n")
356 except IOError:
356 except IOError:
357 try:
357 try:
358 del mm[f]
358 del mm[f]
359 del mfm[f]
359 del mfm[f]
360 if update_dirstate:
360 if update_dirstate:
361 self.dirstate.forget([f])
361 self.dirstate.forget([f])
362 except:
362 except:
363 # deleted from p2?
363 # deleted from p2?
364 pass
364 pass
365
365
366 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
366 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
367 user = user or self.ui.username()
367 user = user or self.ui.username()
368 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
368 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
369 tr.close()
369 tr.close()
370 if update_dirstate:
370 if update_dirstate:
371 self.dirstate.setparents(n, nullid)
371 self.dirstate.setparents(n, nullid)
372
372
373 def commit(self, files=None, text="", user=None, date=None,
373 def commit(self, files=None, text="", user=None, date=None,
374 match=util.always, force=False, wlock=None):
374 match=util.always, force=False, lock=None, wlock=None):
375 commit = []
375 commit = []
376 remove = []
376 remove = []
377 changed = []
377 changed = []
378
378
379 if files:
379 if files:
380 for f in files:
380 for f in files:
381 s = self.dirstate.state(f)
381 s = self.dirstate.state(f)
382 if s in 'nmai':
382 if s in 'nmai':
383 commit.append(f)
383 commit.append(f)
384 elif s == 'r':
384 elif s == 'r':
385 remove.append(f)
385 remove.append(f)
386 else:
386 else:
387 self.ui.warn(_("%s not tracked!\n") % f)
387 self.ui.warn(_("%s not tracked!\n") % f)
388 else:
388 else:
389 modified, added, removed, deleted, unknown = self.changes(match=match)
389 modified, added, removed, deleted, unknown = self.changes(match=match)
390 commit = modified + added
390 commit = modified + added
391 remove = removed
391 remove = removed
392
392
393 p1, p2 = self.dirstate.parents()
393 p1, p2 = self.dirstate.parents()
394 c1 = self.changelog.read(p1)
394 c1 = self.changelog.read(p1)
395 c2 = self.changelog.read(p2)
395 c2 = self.changelog.read(p2)
396 m1 = self.manifest.read(c1[0])
396 m1 = self.manifest.read(c1[0])
397 mf1 = self.manifest.readflags(c1[0])
397 mf1 = self.manifest.readflags(c1[0])
398 m2 = self.manifest.read(c2[0])
398 m2 = self.manifest.read(c2[0])
399
399
400 if not commit and not remove and not force and p2 == nullid:
400 if not commit and not remove and not force and p2 == nullid:
401 self.ui.status(_("nothing changed\n"))
401 self.ui.status(_("nothing changed\n"))
402 return None
402 return None
403
403
404 xp1 = hex(p1)
404 xp1 = hex(p1)
405 if p2 == nullid: xp2 = ''
405 if p2 == nullid: xp2 = ''
406 else: xp2 = hex(p2)
406 else: xp2 = hex(p2)
407
407
408 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
408 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
409
409
410 if not wlock:
410 if not wlock:
411 wlock = self.wlock()
411 wlock = self.wlock()
412 l = self.lock()
412 if not lock:
413 lock = self.lock()
413 tr = self.transaction()
414 tr = self.transaction()
414
415
415 # check in files
416 # check in files
416 new = {}
417 new = {}
417 linkrev = self.changelog.count()
418 linkrev = self.changelog.count()
418 commit.sort()
419 commit.sort()
419 for f in commit:
420 for f in commit:
420 self.ui.note(f + "\n")
421 self.ui.note(f + "\n")
421 try:
422 try:
422 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
423 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
423 t = self.wread(f)
424 t = self.wread(f)
424 except IOError:
425 except IOError:
425 self.ui.warn(_("trouble committing %s!\n") % f)
426 self.ui.warn(_("trouble committing %s!\n") % f)
426 raise
427 raise
427
428
428 r = self.file(f)
429 r = self.file(f)
429
430
430 meta = {}
431 meta = {}
431 cp = self.dirstate.copied(f)
432 cp = self.dirstate.copied(f)
432 if cp:
433 if cp:
433 meta["copy"] = cp
434 meta["copy"] = cp
434 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
435 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
435 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
436 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
436 fp1, fp2 = nullid, nullid
437 fp1, fp2 = nullid, nullid
437 else:
438 else:
438 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
439 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
439 if entry:
440 if entry:
440 new[f] = entry
441 new[f] = entry
441 continue
442 continue
442
443
443 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
444 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
444 # remember what we've added so that we can later calculate
445 # remember what we've added so that we can later calculate
445 # the files to pull from a set of changesets
446 # the files to pull from a set of changesets
446 changed.append(f)
447 changed.append(f)
447
448
448 # update manifest
449 # update manifest
449 m1 = m1.copy()
450 m1 = m1.copy()
450 m1.update(new)
451 m1.update(new)
451 for f in remove:
452 for f in remove:
452 if f in m1:
453 if f in m1:
453 del m1[f]
454 del m1[f]
454 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
455 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
455 (new, remove))
456 (new, remove))
456
457
457 # add changeset
458 # add changeset
458 new = new.keys()
459 new = new.keys()
459 new.sort()
460 new.sort()
460
461
461 if not text:
462 if not text:
462 edittext = [""]
463 edittext = [""]
463 if p2 != nullid:
464 if p2 != nullid:
464 edittext.append("HG: branch merge")
465 edittext.append("HG: branch merge")
465 edittext.extend(["HG: changed %s" % f for f in changed])
466 edittext.extend(["HG: changed %s" % f for f in changed])
466 edittext.extend(["HG: removed %s" % f for f in remove])
467 edittext.extend(["HG: removed %s" % f for f in remove])
467 if not changed and not remove:
468 if not changed and not remove:
468 edittext.append("HG: no files changed")
469 edittext.append("HG: no files changed")
469 edittext.append("")
470 edittext.append("")
470 # run editor in the repository root
471 # run editor in the repository root
471 olddir = os.getcwd()
472 olddir = os.getcwd()
472 os.chdir(self.root)
473 os.chdir(self.root)
473 edittext = self.ui.edit("\n".join(edittext))
474 edittext = self.ui.edit("\n".join(edittext))
474 os.chdir(olddir)
475 os.chdir(olddir)
475 if not edittext.rstrip():
476 if not edittext.rstrip():
476 return None
477 return None
477 text = edittext
478 text = edittext
478
479
479 user = user or self.ui.username()
480 user = user or self.ui.username()
480 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
481 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
481 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
482 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
482 parent2=xp2)
483 parent2=xp2)
483 tr.close()
484 tr.close()
484
485
485 self.dirstate.setparents(n)
486 self.dirstate.setparents(n)
486 self.dirstate.update(new, "n")
487 self.dirstate.update(new, "n")
487 self.dirstate.forget(remove)
488 self.dirstate.forget(remove)
488
489
489 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
490 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
490 return n
491 return n
491
492
492 def walk(self, node=None, files=[], match=util.always):
493 def walk(self, node=None, files=[], match=util.always):
493 if node:
494 if node:
494 fdict = dict.fromkeys(files)
495 fdict = dict.fromkeys(files)
495 for fn in self.manifest.read(self.changelog.read(node)[0]):
496 for fn in self.manifest.read(self.changelog.read(node)[0]):
496 fdict.pop(fn, None)
497 fdict.pop(fn, None)
497 if match(fn):
498 if match(fn):
498 yield 'm', fn
499 yield 'm', fn
499 for fn in fdict:
500 for fn in fdict:
500 self.ui.warn(_('%s: No such file in rev %s\n') % (
501 self.ui.warn(_('%s: No such file in rev %s\n') % (
501 util.pathto(self.getcwd(), fn), short(node)))
502 util.pathto(self.getcwd(), fn), short(node)))
502 else:
503 else:
503 for src, fn in self.dirstate.walk(files, match):
504 for src, fn in self.dirstate.walk(files, match):
504 yield src, fn
505 yield src, fn
505
506
506 def changes(self, node1=None, node2=None, files=[], match=util.always,
507 def changes(self, node1=None, node2=None, files=[], match=util.always,
507 wlock=None):
508 wlock=None):
508 """return changes between two nodes or node and working directory
509 """return changes between two nodes or node and working directory
509
510
510 If node1 is None, use the first dirstate parent instead.
511 If node1 is None, use the first dirstate parent instead.
511 If node2 is None, compare node1 with working directory.
512 If node2 is None, compare node1 with working directory.
512 """
513 """
513
514
514 def fcmp(fn, mf):
515 def fcmp(fn, mf):
515 t1 = self.wread(fn)
516 t1 = self.wread(fn)
516 t2 = self.file(fn).read(mf.get(fn, nullid))
517 t2 = self.file(fn).read(mf.get(fn, nullid))
517 return cmp(t1, t2)
518 return cmp(t1, t2)
518
519
519 def mfmatches(node):
520 def mfmatches(node):
520 change = self.changelog.read(node)
521 change = self.changelog.read(node)
521 mf = dict(self.manifest.read(change[0]))
522 mf = dict(self.manifest.read(change[0]))
522 for fn in mf.keys():
523 for fn in mf.keys():
523 if not match(fn):
524 if not match(fn):
524 del mf[fn]
525 del mf[fn]
525 return mf
526 return mf
526
527
527 if node1:
528 if node1:
528 # read the manifest from node1 before the manifest from node2,
529 # read the manifest from node1 before the manifest from node2,
529 # so that we'll hit the manifest cache if we're going through
530 # so that we'll hit the manifest cache if we're going through
530 # all the revisions in parent->child order.
531 # all the revisions in parent->child order.
531 mf1 = mfmatches(node1)
532 mf1 = mfmatches(node1)
532
533
533 # are we comparing the working directory?
534 # are we comparing the working directory?
534 if not node2:
535 if not node2:
535 if not wlock:
536 if not wlock:
536 try:
537 try:
537 wlock = self.wlock(wait=0)
538 wlock = self.wlock(wait=0)
538 except lock.LockException:
539 except lock.LockException:
539 wlock = None
540 wlock = None
540 lookup, modified, added, removed, deleted, unknown = (
541 lookup, modified, added, removed, deleted, unknown = (
541 self.dirstate.changes(files, match))
542 self.dirstate.changes(files, match))
542
543
543 # are we comparing working dir against its parent?
544 # are we comparing working dir against its parent?
544 if not node1:
545 if not node1:
545 if lookup:
546 if lookup:
546 # do a full compare of any files that might have changed
547 # do a full compare of any files that might have changed
547 mf2 = mfmatches(self.dirstate.parents()[0])
548 mf2 = mfmatches(self.dirstate.parents()[0])
548 for f in lookup:
549 for f in lookup:
549 if fcmp(f, mf2):
550 if fcmp(f, mf2):
550 modified.append(f)
551 modified.append(f)
551 elif wlock is not None:
552 elif wlock is not None:
552 self.dirstate.update([f], "n")
553 self.dirstate.update([f], "n")
553 else:
554 else:
554 # we are comparing working dir against non-parent
555 # we are comparing working dir against non-parent
555 # generate a pseudo-manifest for the working dir
556 # generate a pseudo-manifest for the working dir
556 mf2 = mfmatches(self.dirstate.parents()[0])
557 mf2 = mfmatches(self.dirstate.parents()[0])
557 for f in lookup + modified + added:
558 for f in lookup + modified + added:
558 mf2[f] = ""
559 mf2[f] = ""
559 for f in removed:
560 for f in removed:
560 if f in mf2:
561 if f in mf2:
561 del mf2[f]
562 del mf2[f]
562 else:
563 else:
563 # we are comparing two revisions
564 # we are comparing two revisions
564 deleted, unknown = [], []
565 deleted, unknown = [], []
565 mf2 = mfmatches(node2)
566 mf2 = mfmatches(node2)
566
567
567 if node1:
568 if node1:
568 # flush lists from dirstate before comparing manifests
569 # flush lists from dirstate before comparing manifests
569 modified, added = [], []
570 modified, added = [], []
570
571
571 for fn in mf2:
572 for fn in mf2:
572 if mf1.has_key(fn):
573 if mf1.has_key(fn):
573 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
574 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
574 modified.append(fn)
575 modified.append(fn)
575 del mf1[fn]
576 del mf1[fn]
576 else:
577 else:
577 added.append(fn)
578 added.append(fn)
578
579
579 removed = mf1.keys()
580 removed = mf1.keys()
580
581
581 # sort and return results:
582 # sort and return results:
582 for l in modified, added, removed, deleted, unknown:
583 for l in modified, added, removed, deleted, unknown:
583 l.sort()
584 l.sort()
584 return (modified, added, removed, deleted, unknown)
585 return (modified, added, removed, deleted, unknown)
585
586
586 def add(self, list, wlock=None):
587 def add(self, list, wlock=None):
587 if not wlock:
588 if not wlock:
588 wlock = self.wlock()
589 wlock = self.wlock()
589 for f in list:
590 for f in list:
590 p = self.wjoin(f)
591 p = self.wjoin(f)
591 if not os.path.exists(p):
592 if not os.path.exists(p):
592 self.ui.warn(_("%s does not exist!\n") % f)
593 self.ui.warn(_("%s does not exist!\n") % f)
593 elif not os.path.isfile(p):
594 elif not os.path.isfile(p):
594 self.ui.warn(_("%s not added: only files supported currently\n")
595 self.ui.warn(_("%s not added: only files supported currently\n")
595 % f)
596 % f)
596 elif self.dirstate.state(f) in 'an':
597 elif self.dirstate.state(f) in 'an':
597 self.ui.warn(_("%s already tracked!\n") % f)
598 self.ui.warn(_("%s already tracked!\n") % f)
598 else:
599 else:
599 self.dirstate.update([f], "a")
600 self.dirstate.update([f], "a")
600
601
601 def forget(self, list, wlock=None):
602 def forget(self, list, wlock=None):
602 if not wlock:
603 if not wlock:
603 wlock = self.wlock()
604 wlock = self.wlock()
604 for f in list:
605 for f in list:
605 if self.dirstate.state(f) not in 'ai':
606 if self.dirstate.state(f) not in 'ai':
606 self.ui.warn(_("%s not added!\n") % f)
607 self.ui.warn(_("%s not added!\n") % f)
607 else:
608 else:
608 self.dirstate.forget([f])
609 self.dirstate.forget([f])
609
610
610 def remove(self, list, unlink=False, wlock=None):
611 def remove(self, list, unlink=False, wlock=None):
611 if unlink:
612 if unlink:
612 for f in list:
613 for f in list:
613 try:
614 try:
614 util.unlink(self.wjoin(f))
615 util.unlink(self.wjoin(f))
615 except OSError, inst:
616 except OSError, inst:
616 if inst.errno != errno.ENOENT:
617 if inst.errno != errno.ENOENT:
617 raise
618 raise
618 if not wlock:
619 if not wlock:
619 wlock = self.wlock()
620 wlock = self.wlock()
620 for f in list:
621 for f in list:
621 p = self.wjoin(f)
622 p = self.wjoin(f)
622 if os.path.exists(p):
623 if os.path.exists(p):
623 self.ui.warn(_("%s still exists!\n") % f)
624 self.ui.warn(_("%s still exists!\n") % f)
624 elif self.dirstate.state(f) == 'a':
625 elif self.dirstate.state(f) == 'a':
625 self.dirstate.forget([f])
626 self.dirstate.forget([f])
626 elif f not in self.dirstate:
627 elif f not in self.dirstate:
627 self.ui.warn(_("%s not tracked!\n") % f)
628 self.ui.warn(_("%s not tracked!\n") % f)
628 else:
629 else:
629 self.dirstate.update([f], "r")
630 self.dirstate.update([f], "r")
630
631
631 def undelete(self, list, wlock=None):
632 def undelete(self, list, wlock=None):
632 p = self.dirstate.parents()[0]
633 p = self.dirstate.parents()[0]
633 mn = self.changelog.read(p)[0]
634 mn = self.changelog.read(p)[0]
634 mf = self.manifest.readflags(mn)
635 mf = self.manifest.readflags(mn)
635 m = self.manifest.read(mn)
636 m = self.manifest.read(mn)
636 if not wlock:
637 if not wlock:
637 wlock = self.wlock()
638 wlock = self.wlock()
638 for f in list:
639 for f in list:
639 if self.dirstate.state(f) not in "r":
640 if self.dirstate.state(f) not in "r":
640 self.ui.warn("%s not removed!\n" % f)
641 self.ui.warn("%s not removed!\n" % f)
641 else:
642 else:
642 t = self.file(f).read(m[f])
643 t = self.file(f).read(m[f])
643 self.wwrite(f, t)
644 self.wwrite(f, t)
644 util.set_exec(self.wjoin(f), mf[f])
645 util.set_exec(self.wjoin(f), mf[f])
645 self.dirstate.update([f], "n")
646 self.dirstate.update([f], "n")
646
647
647 def copy(self, source, dest, wlock=None):
648 def copy(self, source, dest, wlock=None):
648 p = self.wjoin(dest)
649 p = self.wjoin(dest)
649 if not os.path.exists(p):
650 if not os.path.exists(p):
650 self.ui.warn(_("%s does not exist!\n") % dest)
651 self.ui.warn(_("%s does not exist!\n") % dest)
651 elif not os.path.isfile(p):
652 elif not os.path.isfile(p):
652 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
653 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
653 else:
654 else:
654 if not wlock:
655 if not wlock:
655 wlock = self.wlock()
656 wlock = self.wlock()
656 if self.dirstate.state(dest) == '?':
657 if self.dirstate.state(dest) == '?':
657 self.dirstate.update([dest], "a")
658 self.dirstate.update([dest], "a")
658 self.dirstate.copy(source, dest)
659 self.dirstate.copy(source, dest)
659
660
660 def heads(self, start=None):
661 def heads(self, start=None):
661 heads = self.changelog.heads(start)
662 heads = self.changelog.heads(start)
662 # sort the output in rev descending order
663 # sort the output in rev descending order
663 heads = [(-self.changelog.rev(h), h) for h in heads]
664 heads = [(-self.changelog.rev(h), h) for h in heads]
664 heads.sort()
665 heads.sort()
665 return [n for (r, n) in heads]
666 return [n for (r, n) in heads]
666
667
667 # branchlookup returns a dict giving a list of branches for
668 # branchlookup returns a dict giving a list of branches for
668 # each head. A branch is defined as the tag of a node or
669 # each head. A branch is defined as the tag of a node or
669 # the branch of the node's parents. If a node has multiple
670 # the branch of the node's parents. If a node has multiple
670 # branch tags, tags are eliminated if they are visible from other
671 # branch tags, tags are eliminated if they are visible from other
671 # branch tags.
672 # branch tags.
672 #
673 #
673 # So, for this graph: a->b->c->d->e
674 # So, for this graph: a->b->c->d->e
674 # \ /
675 # \ /
675 # aa -----/
676 # aa -----/
676 # a has tag 2.6.12
677 # a has tag 2.6.12
677 # d has tag 2.6.13
678 # d has tag 2.6.13
678 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
679 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
679 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
680 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
680 # from the list.
681 # from the list.
681 #
682 #
682 # It is possible that more than one head will have the same branch tag.
683 # It is possible that more than one head will have the same branch tag.
683 # callers need to check the result for multiple heads under the same
684 # callers need to check the result for multiple heads under the same
684 # branch tag if that is a problem for them (ie checkout of a specific
685 # branch tag if that is a problem for them (ie checkout of a specific
685 # branch).
686 # branch).
686 #
687 #
687 # passing in a specific branch will limit the depth of the search
688 # passing in a specific branch will limit the depth of the search
688 # through the parents. It won't limit the branches returned in the
689 # through the parents. It won't limit the branches returned in the
689 # result though.
690 # result though.
690 def branchlookup(self, heads=None, branch=None):
691 def branchlookup(self, heads=None, branch=None):
691 if not heads:
692 if not heads:
692 heads = self.heads()
693 heads = self.heads()
693 headt = [ h for h in heads ]
694 headt = [ h for h in heads ]
694 chlog = self.changelog
695 chlog = self.changelog
695 branches = {}
696 branches = {}
696 merges = []
697 merges = []
697 seenmerge = {}
698 seenmerge = {}
698
699
699 # traverse the tree once for each head, recording in the branches
700 # traverse the tree once for each head, recording in the branches
700 # dict which tags are visible from this head. The branches
701 # dict which tags are visible from this head. The branches
701 # dict also records which tags are visible from each tag
702 # dict also records which tags are visible from each tag
702 # while we traverse.
703 # while we traverse.
703 while headt or merges:
704 while headt or merges:
704 if merges:
705 if merges:
705 n, found = merges.pop()
706 n, found = merges.pop()
706 visit = [n]
707 visit = [n]
707 else:
708 else:
708 h = headt.pop()
709 h = headt.pop()
709 visit = [h]
710 visit = [h]
710 found = [h]
711 found = [h]
711 seen = {}
712 seen = {}
712 while visit:
713 while visit:
713 n = visit.pop()
714 n = visit.pop()
714 if n in seen:
715 if n in seen:
715 continue
716 continue
716 pp = chlog.parents(n)
717 pp = chlog.parents(n)
717 tags = self.nodetags(n)
718 tags = self.nodetags(n)
718 if tags:
719 if tags:
719 for x in tags:
720 for x in tags:
720 if x == 'tip':
721 if x == 'tip':
721 continue
722 continue
722 for f in found:
723 for f in found:
723 branches.setdefault(f, {})[n] = 1
724 branches.setdefault(f, {})[n] = 1
724 branches.setdefault(n, {})[n] = 1
725 branches.setdefault(n, {})[n] = 1
725 break
726 break
726 if n not in found:
727 if n not in found:
727 found.append(n)
728 found.append(n)
728 if branch in tags:
729 if branch in tags:
729 continue
730 continue
730 seen[n] = 1
731 seen[n] = 1
731 if pp[1] != nullid and n not in seenmerge:
732 if pp[1] != nullid and n not in seenmerge:
732 merges.append((pp[1], [x for x in found]))
733 merges.append((pp[1], [x for x in found]))
733 seenmerge[n] = 1
734 seenmerge[n] = 1
734 if pp[0] != nullid:
735 if pp[0] != nullid:
735 visit.append(pp[0])
736 visit.append(pp[0])
736 # traverse the branches dict, eliminating branch tags from each
737 # traverse the branches dict, eliminating branch tags from each
737 # head that are visible from another branch tag for that head.
738 # head that are visible from another branch tag for that head.
738 out = {}
739 out = {}
739 viscache = {}
740 viscache = {}
740 for h in heads:
741 for h in heads:
741 def visible(node):
742 def visible(node):
742 if node in viscache:
743 if node in viscache:
743 return viscache[node]
744 return viscache[node]
744 ret = {}
745 ret = {}
745 visit = [node]
746 visit = [node]
746 while visit:
747 while visit:
747 x = visit.pop()
748 x = visit.pop()
748 if x in viscache:
749 if x in viscache:
749 ret.update(viscache[x])
750 ret.update(viscache[x])
750 elif x not in ret:
751 elif x not in ret:
751 ret[x] = 1
752 ret[x] = 1
752 if x in branches:
753 if x in branches:
753 visit[len(visit):] = branches[x].keys()
754 visit[len(visit):] = branches[x].keys()
754 viscache[node] = ret
755 viscache[node] = ret
755 return ret
756 return ret
756 if h not in branches:
757 if h not in branches:
757 continue
758 continue
758 # O(n^2), but somewhat limited. This only searches the
759 # O(n^2), but somewhat limited. This only searches the
759 # tags visible from a specific head, not all the tags in the
760 # tags visible from a specific head, not all the tags in the
760 # whole repo.
761 # whole repo.
761 for b in branches[h]:
762 for b in branches[h]:
762 vis = False
763 vis = False
763 for bb in branches[h].keys():
764 for bb in branches[h].keys():
764 if b != bb:
765 if b != bb:
765 if b in visible(bb):
766 if b in visible(bb):
766 vis = True
767 vis = True
767 break
768 break
768 if not vis:
769 if not vis:
769 l = out.setdefault(h, [])
770 l = out.setdefault(h, [])
770 l[len(l):] = self.nodetags(b)
771 l[len(l):] = self.nodetags(b)
771 return out
772 return out
772
773
773 def branches(self, nodes):
774 def branches(self, nodes):
774 if not nodes:
775 if not nodes:
775 nodes = [self.changelog.tip()]
776 nodes = [self.changelog.tip()]
776 b = []
777 b = []
777 for n in nodes:
778 for n in nodes:
778 t = n
779 t = n
779 while n:
780 while n:
780 p = self.changelog.parents(n)
781 p = self.changelog.parents(n)
781 if p[1] != nullid or p[0] == nullid:
782 if p[1] != nullid or p[0] == nullid:
782 b.append((t, n, p[0], p[1]))
783 b.append((t, n, p[0], p[1]))
783 break
784 break
784 n = p[0]
785 n = p[0]
785 return b
786 return b
786
787
787 def between(self, pairs):
788 def between(self, pairs):
788 r = []
789 r = []
789
790
790 for top, bottom in pairs:
791 for top, bottom in pairs:
791 n, l, i = top, [], 0
792 n, l, i = top, [], 0
792 f = 1
793 f = 1
793
794
794 while n != bottom:
795 while n != bottom:
795 p = self.changelog.parents(n)[0]
796 p = self.changelog.parents(n)[0]
796 if i == f:
797 if i == f:
797 l.append(n)
798 l.append(n)
798 f = f * 2
799 f = f * 2
799 n = p
800 n = p
800 i += 1
801 i += 1
801
802
802 r.append(l)
803 r.append(l)
803
804
804 return r
805 return r
805
806
806 def findincoming(self, remote, base=None, heads=None):
807 def findincoming(self, remote, base=None, heads=None):
807 m = self.changelog.nodemap
808 m = self.changelog.nodemap
808 search = []
809 search = []
809 fetch = {}
810 fetch = {}
810 seen = {}
811 seen = {}
811 seenbranch = {}
812 seenbranch = {}
812 if base == None:
813 if base == None:
813 base = {}
814 base = {}
814
815
815 # assume we're closer to the tip than the root
816 # assume we're closer to the tip than the root
816 # and start by examining the heads
817 # and start by examining the heads
817 self.ui.status(_("searching for changes\n"))
818 self.ui.status(_("searching for changes\n"))
818
819
819 if not heads:
820 if not heads:
820 heads = remote.heads()
821 heads = remote.heads()
821
822
822 unknown = []
823 unknown = []
823 for h in heads:
824 for h in heads:
824 if h not in m:
825 if h not in m:
825 unknown.append(h)
826 unknown.append(h)
826 else:
827 else:
827 base[h] = 1
828 base[h] = 1
828
829
829 if not unknown:
830 if not unknown:
830 return None
831 return None
831
832
832 rep = {}
833 rep = {}
833 reqcnt = 0
834 reqcnt = 0
834
835
835 # search through remote branches
836 # search through remote branches
836 # a 'branch' here is a linear segment of history, with four parts:
837 # a 'branch' here is a linear segment of history, with four parts:
837 # head, root, first parent, second parent
838 # head, root, first parent, second parent
838 # (a branch always has two parents (or none) by definition)
839 # (a branch always has two parents (or none) by definition)
839 unknown = remote.branches(unknown)
840 unknown = remote.branches(unknown)
840 while unknown:
841 while unknown:
841 r = []
842 r = []
842 while unknown:
843 while unknown:
843 n = unknown.pop(0)
844 n = unknown.pop(0)
844 if n[0] in seen:
845 if n[0] in seen:
845 continue
846 continue
846
847
847 self.ui.debug(_("examining %s:%s\n")
848 self.ui.debug(_("examining %s:%s\n")
848 % (short(n[0]), short(n[1])))
849 % (short(n[0]), short(n[1])))
849 if n[0] == nullid:
850 if n[0] == nullid:
850 break
851 break
851 if n in seenbranch:
852 if n in seenbranch:
852 self.ui.debug(_("branch already found\n"))
853 self.ui.debug(_("branch already found\n"))
853 continue
854 continue
854 if n[1] and n[1] in m: # do we know the base?
855 if n[1] and n[1] in m: # do we know the base?
855 self.ui.debug(_("found incomplete branch %s:%s\n")
856 self.ui.debug(_("found incomplete branch %s:%s\n")
856 % (short(n[0]), short(n[1])))
857 % (short(n[0]), short(n[1])))
857 search.append(n) # schedule branch range for scanning
858 search.append(n) # schedule branch range for scanning
858 seenbranch[n] = 1
859 seenbranch[n] = 1
859 else:
860 else:
860 if n[1] not in seen and n[1] not in fetch:
861 if n[1] not in seen and n[1] not in fetch:
861 if n[2] in m and n[3] in m:
862 if n[2] in m and n[3] in m:
862 self.ui.debug(_("found new changeset %s\n") %
863 self.ui.debug(_("found new changeset %s\n") %
863 short(n[1]))
864 short(n[1]))
864 fetch[n[1]] = 1 # earliest unknown
865 fetch[n[1]] = 1 # earliest unknown
865 base[n[2]] = 1 # latest known
866 base[n[2]] = 1 # latest known
866 continue
867 continue
867
868
868 for a in n[2:4]:
869 for a in n[2:4]:
869 if a not in rep:
870 if a not in rep:
870 r.append(a)
871 r.append(a)
871 rep[a] = 1
872 rep[a] = 1
872
873
873 seen[n[0]] = 1
874 seen[n[0]] = 1
874
875
875 if r:
876 if r:
876 reqcnt += 1
877 reqcnt += 1
877 self.ui.debug(_("request %d: %s\n") %
878 self.ui.debug(_("request %d: %s\n") %
878 (reqcnt, " ".join(map(short, r))))
879 (reqcnt, " ".join(map(short, r))))
879 for p in range(0, len(r), 10):
880 for p in range(0, len(r), 10):
880 for b in remote.branches(r[p:p+10]):
881 for b in remote.branches(r[p:p+10]):
881 self.ui.debug(_("received %s:%s\n") %
882 self.ui.debug(_("received %s:%s\n") %
882 (short(b[0]), short(b[1])))
883 (short(b[0]), short(b[1])))
883 if b[0] in m:
884 if b[0] in m:
884 self.ui.debug(_("found base node %s\n")
885 self.ui.debug(_("found base node %s\n")
885 % short(b[0]))
886 % short(b[0]))
886 base[b[0]] = 1
887 base[b[0]] = 1
887 elif b[0] not in seen:
888 elif b[0] not in seen:
888 unknown.append(b)
889 unknown.append(b)
889
890
890 # do binary search on the branches we found
891 # do binary search on the branches we found
891 while search:
892 while search:
892 n = search.pop(0)
893 n = search.pop(0)
893 reqcnt += 1
894 reqcnt += 1
894 l = remote.between([(n[0], n[1])])[0]
895 l = remote.between([(n[0], n[1])])[0]
895 l.append(n[1])
896 l.append(n[1])
896 p = n[0]
897 p = n[0]
897 f = 1
898 f = 1
898 for i in l:
899 for i in l:
899 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
900 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
900 if i in m:
901 if i in m:
901 if f <= 2:
902 if f <= 2:
902 self.ui.debug(_("found new branch changeset %s\n") %
903 self.ui.debug(_("found new branch changeset %s\n") %
903 short(p))
904 short(p))
904 fetch[p] = 1
905 fetch[p] = 1
905 base[i] = 1
906 base[i] = 1
906 else:
907 else:
907 self.ui.debug(_("narrowed branch search to %s:%s\n")
908 self.ui.debug(_("narrowed branch search to %s:%s\n")
908 % (short(p), short(i)))
909 % (short(p), short(i)))
909 search.append((p, i))
910 search.append((p, i))
910 break
911 break
911 p, f = i, f * 2
912 p, f = i, f * 2
912
913
913 # sanity check our fetch list
914 # sanity check our fetch list
914 for f in fetch.keys():
915 for f in fetch.keys():
915 if f in m:
916 if f in m:
916 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
917 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
917
918
918 if base.keys() == [nullid]:
919 if base.keys() == [nullid]:
919 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
920 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
920
921
921 self.ui.note(_("found new changesets starting at ") +
922 self.ui.note(_("found new changesets starting at ") +
922 " ".join([short(f) for f in fetch]) + "\n")
923 " ".join([short(f) for f in fetch]) + "\n")
923
924
924 self.ui.debug(_("%d total queries\n") % reqcnt)
925 self.ui.debug(_("%d total queries\n") % reqcnt)
925
926
926 return fetch.keys()
927 return fetch.keys()
927
928
928 def findoutgoing(self, remote, base=None, heads=None):
929 def findoutgoing(self, remote, base=None, heads=None):
929 if base == None:
930 if base == None:
930 base = {}
931 base = {}
931 self.findincoming(remote, base, heads)
932 self.findincoming(remote, base, heads)
932
933
933 self.ui.debug(_("common changesets up to ")
934 self.ui.debug(_("common changesets up to ")
934 + " ".join(map(short, base.keys())) + "\n")
935 + " ".join(map(short, base.keys())) + "\n")
935
936
936 remain = dict.fromkeys(self.changelog.nodemap)
937 remain = dict.fromkeys(self.changelog.nodemap)
937
938
938 # prune everything remote has from the tree
939 # prune everything remote has from the tree
939 del remain[nullid]
940 del remain[nullid]
940 remove = base.keys()
941 remove = base.keys()
941 while remove:
942 while remove:
942 n = remove.pop(0)
943 n = remove.pop(0)
943 if n in remain:
944 if n in remain:
944 del remain[n]
945 del remain[n]
945 for p in self.changelog.parents(n):
946 for p in self.changelog.parents(n):
946 remove.append(p)
947 remove.append(p)
947
948
948 # find every node whose parents have been pruned
949 # find every node whose parents have been pruned
949 subset = []
950 subset = []
950 for n in remain:
951 for n in remain:
951 p1, p2 = self.changelog.parents(n)
952 p1, p2 = self.changelog.parents(n)
952 if p1 not in remain and p2 not in remain:
953 if p1 not in remain and p2 not in remain:
953 subset.append(n)
954 subset.append(n)
954
955
955 # this is the set of all roots we have to push
956 # this is the set of all roots we have to push
956 return subset
957 return subset
957
958
958 def pull(self, remote, heads=None):
959 def pull(self, remote, heads=None):
959 l = self.lock()
960 l = self.lock()
960
961
961 # if we have an empty repo, fetch everything
962 # if we have an empty repo, fetch everything
962 if self.changelog.tip() == nullid:
963 if self.changelog.tip() == nullid:
963 self.ui.status(_("requesting all changes\n"))
964 self.ui.status(_("requesting all changes\n"))
964 fetch = [nullid]
965 fetch = [nullid]
965 else:
966 else:
966 fetch = self.findincoming(remote)
967 fetch = self.findincoming(remote)
967
968
968 if not fetch:
969 if not fetch:
969 self.ui.status(_("no changes found\n"))
970 self.ui.status(_("no changes found\n"))
970 return 1
971 return 1
971
972
972 if heads is None:
973 if heads is None:
973 cg = remote.changegroup(fetch, 'pull')
974 cg = remote.changegroup(fetch, 'pull')
974 else:
975 else:
975 cg = remote.changegroupsubset(fetch, heads, 'pull')
976 cg = remote.changegroupsubset(fetch, heads, 'pull')
976 return self.addchangegroup(cg)
977 return self.addchangegroup(cg)
977
978
978 def push(self, remote, force=False, revs=None):
979 def push(self, remote, force=False, revs=None):
979 lock = remote.lock()
980 lock = remote.lock()
980
981
981 base = {}
982 base = {}
982 heads = remote.heads()
983 heads = remote.heads()
983 inc = self.findincoming(remote, base, heads)
984 inc = self.findincoming(remote, base, heads)
984 if not force and inc:
985 if not force and inc:
985 self.ui.warn(_("abort: unsynced remote changes!\n"))
986 self.ui.warn(_("abort: unsynced remote changes!\n"))
986 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
987 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
987 return 1
988 return 1
988
989
989 update = self.findoutgoing(remote, base)
990 update = self.findoutgoing(remote, base)
990 if revs is not None:
991 if revs is not None:
991 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
992 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
992 else:
993 else:
993 bases, heads = update, self.changelog.heads()
994 bases, heads = update, self.changelog.heads()
994
995
995 if not bases:
996 if not bases:
996 self.ui.status(_("no changes found\n"))
997 self.ui.status(_("no changes found\n"))
997 return 1
998 return 1
998 elif not force:
999 elif not force:
999 if len(bases) < len(heads):
1000 if len(bases) < len(heads):
1000 self.ui.warn(_("abort: push creates new remote branches!\n"))
1001 self.ui.warn(_("abort: push creates new remote branches!\n"))
1001 self.ui.status(_("(did you forget to merge?"
1002 self.ui.status(_("(did you forget to merge?"
1002 " use push -f to force)\n"))
1003 " use push -f to force)\n"))
1003 return 1
1004 return 1
1004
1005
1005 if revs is None:
1006 if revs is None:
1006 cg = self.changegroup(update, 'push')
1007 cg = self.changegroup(update, 'push')
1007 else:
1008 else:
1008 cg = self.changegroupsubset(update, revs, 'push')
1009 cg = self.changegroupsubset(update, revs, 'push')
1009 return remote.addchangegroup(cg)
1010 return remote.addchangegroup(cg)
1010
1011
1011 def changegroupsubset(self, bases, heads, source):
1012 def changegroupsubset(self, bases, heads, source):
1012 """This function generates a changegroup consisting of all the nodes
1013 """This function generates a changegroup consisting of all the nodes
1013 that are descendents of any of the bases, and ancestors of any of
1014 that are descendents of any of the bases, and ancestors of any of
1014 the heads.
1015 the heads.
1015
1016
1016 It is fairly complex as determining which filenodes and which
1017 It is fairly complex as determining which filenodes and which
1017 manifest nodes need to be included for the changeset to be complete
1018 manifest nodes need to be included for the changeset to be complete
1018 is non-trivial.
1019 is non-trivial.
1019
1020
1020 Another wrinkle is doing the reverse, figuring out which changeset in
1021 Another wrinkle is doing the reverse, figuring out which changeset in
1021 the changegroup a particular filenode or manifestnode belongs to."""
1022 the changegroup a particular filenode or manifestnode belongs to."""
1022
1023
1023 self.hook('preoutgoing', throw=True, source=source)
1024 self.hook('preoutgoing', throw=True, source=source)
1024
1025
1025 # Set up some initial variables
1026 # Set up some initial variables
1026 # Make it easy to refer to self.changelog
1027 # Make it easy to refer to self.changelog
1027 cl = self.changelog
1028 cl = self.changelog
1028 # msng is short for missing - compute the list of changesets in this
1029 # msng is short for missing - compute the list of changesets in this
1029 # changegroup.
1030 # changegroup.
1030 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1031 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1031 # Some bases may turn out to be superfluous, and some heads may be
1032 # Some bases may turn out to be superfluous, and some heads may be
1032 # too. nodesbetween will return the minimal set of bases and heads
1033 # too. nodesbetween will return the minimal set of bases and heads
1033 # necessary to re-create the changegroup.
1034 # necessary to re-create the changegroup.
1034
1035
1035 # Known heads are the list of heads that it is assumed the recipient
1036 # Known heads are the list of heads that it is assumed the recipient
1036 # of this changegroup will know about.
1037 # of this changegroup will know about.
1037 knownheads = {}
1038 knownheads = {}
1038 # We assume that all parents of bases are known heads.
1039 # We assume that all parents of bases are known heads.
1039 for n in bases:
1040 for n in bases:
1040 for p in cl.parents(n):
1041 for p in cl.parents(n):
1041 if p != nullid:
1042 if p != nullid:
1042 knownheads[p] = 1
1043 knownheads[p] = 1
1043 knownheads = knownheads.keys()
1044 knownheads = knownheads.keys()
1044 if knownheads:
1045 if knownheads:
1045 # Now that we know what heads are known, we can compute which
1046 # Now that we know what heads are known, we can compute which
1046 # changesets are known. The recipient must know about all
1047 # changesets are known. The recipient must know about all
1047 # changesets required to reach the known heads from the null
1048 # changesets required to reach the known heads from the null
1048 # changeset.
1049 # changeset.
1049 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1050 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1050 junk = None
1051 junk = None
1051 # Transform the list into an ersatz set.
1052 # Transform the list into an ersatz set.
1052 has_cl_set = dict.fromkeys(has_cl_set)
1053 has_cl_set = dict.fromkeys(has_cl_set)
1053 else:
1054 else:
1054 # If there were no known heads, the recipient cannot be assumed to
1055 # If there were no known heads, the recipient cannot be assumed to
1055 # know about any changesets.
1056 # know about any changesets.
1056 has_cl_set = {}
1057 has_cl_set = {}
1057
1058
1058 # Make it easy to refer to self.manifest
1059 # Make it easy to refer to self.manifest
1059 mnfst = self.manifest
1060 mnfst = self.manifest
1060 # We don't know which manifests are missing yet
1061 # We don't know which manifests are missing yet
1061 msng_mnfst_set = {}
1062 msng_mnfst_set = {}
1062 # Nor do we know which filenodes are missing.
1063 # Nor do we know which filenodes are missing.
1063 msng_filenode_set = {}
1064 msng_filenode_set = {}
1064
1065
1065 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1066 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1066 junk = None
1067 junk = None
1067
1068
1068 # A changeset always belongs to itself, so the changenode lookup
1069 # A changeset always belongs to itself, so the changenode lookup
1069 # function for a changenode is identity.
1070 # function for a changenode is identity.
1070 def identity(x):
1071 def identity(x):
1071 return x
1072 return x
1072
1073
1073 # A function generating function. Sets up an environment for the
1074 # A function generating function. Sets up an environment for the
1074 # inner function.
1075 # inner function.
1075 def cmp_by_rev_func(revlog):
1076 def cmp_by_rev_func(revlog):
1076 # Compare two nodes by their revision number in the environment's
1077 # Compare two nodes by their revision number in the environment's
1077 # revision history. Since the revision number both represents the
1078 # revision history. Since the revision number both represents the
1078 # most efficient order to read the nodes in, and represents a
1079 # most efficient order to read the nodes in, and represents a
1079 # topological sorting of the nodes, this function is often useful.
1080 # topological sorting of the nodes, this function is often useful.
1080 def cmp_by_rev(a, b):
1081 def cmp_by_rev(a, b):
1081 return cmp(revlog.rev(a), revlog.rev(b))
1082 return cmp(revlog.rev(a), revlog.rev(b))
1082 return cmp_by_rev
1083 return cmp_by_rev
1083
1084
1084 # If we determine that a particular file or manifest node must be a
1085 # If we determine that a particular file or manifest node must be a
1085 # node that the recipient of the changegroup will already have, we can
1086 # node that the recipient of the changegroup will already have, we can
1086 # also assume the recipient will have all the parents. This function
1087 # also assume the recipient will have all the parents. This function
1087 # prunes them from the set of missing nodes.
1088 # prunes them from the set of missing nodes.
1088 def prune_parents(revlog, hasset, msngset):
1089 def prune_parents(revlog, hasset, msngset):
1089 haslst = hasset.keys()
1090 haslst = hasset.keys()
1090 haslst.sort(cmp_by_rev_func(revlog))
1091 haslst.sort(cmp_by_rev_func(revlog))
1091 for node in haslst:
1092 for node in haslst:
1092 parentlst = [p for p in revlog.parents(node) if p != nullid]
1093 parentlst = [p for p in revlog.parents(node) if p != nullid]
1093 while parentlst:
1094 while parentlst:
1094 n = parentlst.pop()
1095 n = parentlst.pop()
1095 if n not in hasset:
1096 if n not in hasset:
1096 hasset[n] = 1
1097 hasset[n] = 1
1097 p = [p for p in revlog.parents(n) if p != nullid]
1098 p = [p for p in revlog.parents(n) if p != nullid]
1098 parentlst.extend(p)
1099 parentlst.extend(p)
1099 for n in hasset:
1100 for n in hasset:
1100 msngset.pop(n, None)
1101 msngset.pop(n, None)
1101
1102
1102 # This is a function generating function used to set up an environment
1103 # This is a function generating function used to set up an environment
1103 # for the inner function to execute in.
1104 # for the inner function to execute in.
1104 def manifest_and_file_collector(changedfileset):
1105 def manifest_and_file_collector(changedfileset):
1105 # This is an information gathering function that gathers
1106 # This is an information gathering function that gathers
1106 # information from each changeset node that goes out as part of
1107 # information from each changeset node that goes out as part of
1107 # the changegroup. The information gathered is a list of which
1108 # the changegroup. The information gathered is a list of which
1108 # manifest nodes are potentially required (the recipient may
1109 # manifest nodes are potentially required (the recipient may
1109 # already have them) and total list of all files which were
1110 # already have them) and total list of all files which were
1110 # changed in any changeset in the changegroup.
1111 # changed in any changeset in the changegroup.
1111 #
1112 #
1112 # We also remember the first changenode we saw any manifest
1113 # We also remember the first changenode we saw any manifest
1113 # referenced by so we can later determine which changenode 'owns'
1114 # referenced by so we can later determine which changenode 'owns'
1114 # the manifest.
1115 # the manifest.
1115 def collect_manifests_and_files(clnode):
1116 def collect_manifests_and_files(clnode):
1116 c = cl.read(clnode)
1117 c = cl.read(clnode)
1117 for f in c[3]:
1118 for f in c[3]:
1118 # This is to make sure we only have one instance of each
1119 # This is to make sure we only have one instance of each
1119 # filename string for each filename.
1120 # filename string for each filename.
1120 changedfileset.setdefault(f, f)
1121 changedfileset.setdefault(f, f)
1121 msng_mnfst_set.setdefault(c[0], clnode)
1122 msng_mnfst_set.setdefault(c[0], clnode)
1122 return collect_manifests_and_files
1123 return collect_manifests_and_files
1123
1124
1124 # Figure out which manifest nodes (of the ones we think might be part
1125 # Figure out which manifest nodes (of the ones we think might be part
1125 # of the changegroup) the recipient must know about and remove them
1126 # of the changegroup) the recipient must know about and remove them
1126 # from the changegroup.
1127 # from the changegroup.
1127 def prune_manifests():
1128 def prune_manifests():
1128 has_mnfst_set = {}
1129 has_mnfst_set = {}
1129 for n in msng_mnfst_set:
1130 for n in msng_mnfst_set:
1130 # If a 'missing' manifest thinks it belongs to a changenode
1131 # If a 'missing' manifest thinks it belongs to a changenode
1131 # the recipient is assumed to have, obviously the recipient
1132 # the recipient is assumed to have, obviously the recipient
1132 # must have that manifest.
1133 # must have that manifest.
1133 linknode = cl.node(mnfst.linkrev(n))
1134 linknode = cl.node(mnfst.linkrev(n))
1134 if linknode in has_cl_set:
1135 if linknode in has_cl_set:
1135 has_mnfst_set[n] = 1
1136 has_mnfst_set[n] = 1
1136 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1137 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1137
1138
1138 # Use the information collected in collect_manifests_and_files to say
1139 # Use the information collected in collect_manifests_and_files to say
1139 # which changenode any manifestnode belongs to.
1140 # which changenode any manifestnode belongs to.
1140 def lookup_manifest_link(mnfstnode):
1141 def lookup_manifest_link(mnfstnode):
1141 return msng_mnfst_set[mnfstnode]
1142 return msng_mnfst_set[mnfstnode]
1142
1143
1143 # A function generating function that sets up the initial environment
1144 # A function generating function that sets up the initial environment
1144 # the inner function.
1145 # the inner function.
1145 def filenode_collector(changedfiles):
1146 def filenode_collector(changedfiles):
1146 next_rev = [0]
1147 next_rev = [0]
1147 # This gathers information from each manifestnode included in the
1148 # This gathers information from each manifestnode included in the
1148 # changegroup about which filenodes the manifest node references
1149 # changegroup about which filenodes the manifest node references
1149 # so we can include those in the changegroup too.
1150 # so we can include those in the changegroup too.
1150 #
1151 #
1151 # It also remembers which changenode each filenode belongs to. It
1152 # It also remembers which changenode each filenode belongs to. It
1152 # does this by assuming the a filenode belongs to the changenode
1153 # does this by assuming the a filenode belongs to the changenode
1153 # the first manifest that references it belongs to.
1154 # the first manifest that references it belongs to.
1154 def collect_msng_filenodes(mnfstnode):
1155 def collect_msng_filenodes(mnfstnode):
1155 r = mnfst.rev(mnfstnode)
1156 r = mnfst.rev(mnfstnode)
1156 if r == next_rev[0]:
1157 if r == next_rev[0]:
1157 # If the last rev we looked at was the one just previous,
1158 # If the last rev we looked at was the one just previous,
1158 # we only need to see a diff.
1159 # we only need to see a diff.
1159 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1160 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1160 # For each line in the delta
1161 # For each line in the delta
1161 for dline in delta.splitlines():
1162 for dline in delta.splitlines():
1162 # get the filename and filenode for that line
1163 # get the filename and filenode for that line
1163 f, fnode = dline.split('\0')
1164 f, fnode = dline.split('\0')
1164 fnode = bin(fnode[:40])
1165 fnode = bin(fnode[:40])
1165 f = changedfiles.get(f, None)
1166 f = changedfiles.get(f, None)
1166 # And if the file is in the list of files we care
1167 # And if the file is in the list of files we care
1167 # about.
1168 # about.
1168 if f is not None:
1169 if f is not None:
1169 # Get the changenode this manifest belongs to
1170 # Get the changenode this manifest belongs to
1170 clnode = msng_mnfst_set[mnfstnode]
1171 clnode = msng_mnfst_set[mnfstnode]
1171 # Create the set of filenodes for the file if
1172 # Create the set of filenodes for the file if
1172 # there isn't one already.
1173 # there isn't one already.
1173 ndset = msng_filenode_set.setdefault(f, {})
1174 ndset = msng_filenode_set.setdefault(f, {})
1174 # And set the filenode's changelog node to the
1175 # And set the filenode's changelog node to the
1175 # manifest's if it hasn't been set already.
1176 # manifest's if it hasn't been set already.
1176 ndset.setdefault(fnode, clnode)
1177 ndset.setdefault(fnode, clnode)
1177 else:
1178 else:
1178 # Otherwise we need a full manifest.
1179 # Otherwise we need a full manifest.
1179 m = mnfst.read(mnfstnode)
1180 m = mnfst.read(mnfstnode)
1180 # For every file in we care about.
1181 # For every file in we care about.
1181 for f in changedfiles:
1182 for f in changedfiles:
1182 fnode = m.get(f, None)
1183 fnode = m.get(f, None)
1183 # If it's in the manifest
1184 # If it's in the manifest
1184 if fnode is not None:
1185 if fnode is not None:
1185 # See comments above.
1186 # See comments above.
1186 clnode = msng_mnfst_set[mnfstnode]
1187 clnode = msng_mnfst_set[mnfstnode]
1187 ndset = msng_filenode_set.setdefault(f, {})
1188 ndset = msng_filenode_set.setdefault(f, {})
1188 ndset.setdefault(fnode, clnode)
1189 ndset.setdefault(fnode, clnode)
1189 # Remember the revision we hope to see next.
1190 # Remember the revision we hope to see next.
1190 next_rev[0] = r + 1
1191 next_rev[0] = r + 1
1191 return collect_msng_filenodes
1192 return collect_msng_filenodes
1192
1193
1193 # We have a list of filenodes we think we need for a file, lets remove
1194 # We have a list of filenodes we think we need for a file, lets remove
1194 # all those we now the recipient must have.
1195 # all those we now the recipient must have.
1195 def prune_filenodes(f, filerevlog):
1196 def prune_filenodes(f, filerevlog):
1196 msngset = msng_filenode_set[f]
1197 msngset = msng_filenode_set[f]
1197 hasset = {}
1198 hasset = {}
1198 # If a 'missing' filenode thinks it belongs to a changenode we
1199 # If a 'missing' filenode thinks it belongs to a changenode we
1199 # assume the recipient must have, then the recipient must have
1200 # assume the recipient must have, then the recipient must have
1200 # that filenode.
1201 # that filenode.
1201 for n in msngset:
1202 for n in msngset:
1202 clnode = cl.node(filerevlog.linkrev(n))
1203 clnode = cl.node(filerevlog.linkrev(n))
1203 if clnode in has_cl_set:
1204 if clnode in has_cl_set:
1204 hasset[n] = 1
1205 hasset[n] = 1
1205 prune_parents(filerevlog, hasset, msngset)
1206 prune_parents(filerevlog, hasset, msngset)
1206
1207
1207 # A function generator function that sets up the a context for the
1208 # A function generator function that sets up the a context for the
1208 # inner function.
1209 # inner function.
1209 def lookup_filenode_link_func(fname):
1210 def lookup_filenode_link_func(fname):
1210 msngset = msng_filenode_set[fname]
1211 msngset = msng_filenode_set[fname]
1211 # Lookup the changenode the filenode belongs to.
1212 # Lookup the changenode the filenode belongs to.
1212 def lookup_filenode_link(fnode):
1213 def lookup_filenode_link(fnode):
1213 return msngset[fnode]
1214 return msngset[fnode]
1214 return lookup_filenode_link
1215 return lookup_filenode_link
1215
1216
1216 # Now that we have all theses utility functions to help out and
1217 # Now that we have all theses utility functions to help out and
1217 # logically divide up the task, generate the group.
1218 # logically divide up the task, generate the group.
1218 def gengroup():
1219 def gengroup():
1219 # The set of changed files starts empty.
1220 # The set of changed files starts empty.
1220 changedfiles = {}
1221 changedfiles = {}
1221 # Create a changenode group generator that will call our functions
1222 # Create a changenode group generator that will call our functions
1222 # back to lookup the owning changenode and collect information.
1223 # back to lookup the owning changenode and collect information.
1223 group = cl.group(msng_cl_lst, identity,
1224 group = cl.group(msng_cl_lst, identity,
1224 manifest_and_file_collector(changedfiles))
1225 manifest_and_file_collector(changedfiles))
1225 for chnk in group:
1226 for chnk in group:
1226 yield chnk
1227 yield chnk
1227
1228
1228 # The list of manifests has been collected by the generator
1229 # The list of manifests has been collected by the generator
1229 # calling our functions back.
1230 # calling our functions back.
1230 prune_manifests()
1231 prune_manifests()
1231 msng_mnfst_lst = msng_mnfst_set.keys()
1232 msng_mnfst_lst = msng_mnfst_set.keys()
1232 # Sort the manifestnodes by revision number.
1233 # Sort the manifestnodes by revision number.
1233 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1234 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1234 # Create a generator for the manifestnodes that calls our lookup
1235 # Create a generator for the manifestnodes that calls our lookup
1235 # and data collection functions back.
1236 # and data collection functions back.
1236 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1237 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1237 filenode_collector(changedfiles))
1238 filenode_collector(changedfiles))
1238 for chnk in group:
1239 for chnk in group:
1239 yield chnk
1240 yield chnk
1240
1241
1241 # These are no longer needed, dereference and toss the memory for
1242 # These are no longer needed, dereference and toss the memory for
1242 # them.
1243 # them.
1243 msng_mnfst_lst = None
1244 msng_mnfst_lst = None
1244 msng_mnfst_set.clear()
1245 msng_mnfst_set.clear()
1245
1246
1246 changedfiles = changedfiles.keys()
1247 changedfiles = changedfiles.keys()
1247 changedfiles.sort()
1248 changedfiles.sort()
1248 # Go through all our files in order sorted by name.
1249 # Go through all our files in order sorted by name.
1249 for fname in changedfiles:
1250 for fname in changedfiles:
1250 filerevlog = self.file(fname)
1251 filerevlog = self.file(fname)
1251 # Toss out the filenodes that the recipient isn't really
1252 # Toss out the filenodes that the recipient isn't really
1252 # missing.
1253 # missing.
1253 if msng_filenode_set.has_key(fname):
1254 if msng_filenode_set.has_key(fname):
1254 prune_filenodes(fname, filerevlog)
1255 prune_filenodes(fname, filerevlog)
1255 msng_filenode_lst = msng_filenode_set[fname].keys()
1256 msng_filenode_lst = msng_filenode_set[fname].keys()
1256 else:
1257 else:
1257 msng_filenode_lst = []
1258 msng_filenode_lst = []
1258 # If any filenodes are left, generate the group for them,
1259 # If any filenodes are left, generate the group for them,
1259 # otherwise don't bother.
1260 # otherwise don't bother.
1260 if len(msng_filenode_lst) > 0:
1261 if len(msng_filenode_lst) > 0:
1261 yield struct.pack(">l", len(fname) + 4) + fname
1262 yield struct.pack(">l", len(fname) + 4) + fname
1262 # Sort the filenodes by their revision #
1263 # Sort the filenodes by their revision #
1263 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1264 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1264 # Create a group generator and only pass in a changenode
1265 # Create a group generator and only pass in a changenode
1265 # lookup function as we need to collect no information
1266 # lookup function as we need to collect no information
1266 # from filenodes.
1267 # from filenodes.
1267 group = filerevlog.group(msng_filenode_lst,
1268 group = filerevlog.group(msng_filenode_lst,
1268 lookup_filenode_link_func(fname))
1269 lookup_filenode_link_func(fname))
1269 for chnk in group:
1270 for chnk in group:
1270 yield chnk
1271 yield chnk
1271 if msng_filenode_set.has_key(fname):
1272 if msng_filenode_set.has_key(fname):
1272 # Don't need this anymore, toss it to free memory.
1273 # Don't need this anymore, toss it to free memory.
1273 del msng_filenode_set[fname]
1274 del msng_filenode_set[fname]
1274 # Signal that no more groups are left.
1275 # Signal that no more groups are left.
1275 yield struct.pack(">l", 0)
1276 yield struct.pack(">l", 0)
1276
1277
1277 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1278 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1278
1279
1279 return util.chunkbuffer(gengroup())
1280 return util.chunkbuffer(gengroup())
1280
1281
1281 def changegroup(self, basenodes, source):
1282 def changegroup(self, basenodes, source):
1282 """Generate a changegroup of all nodes that we have that a recipient
1283 """Generate a changegroup of all nodes that we have that a recipient
1283 doesn't.
1284 doesn't.
1284
1285
1285 This is much easier than the previous function as we can assume that
1286 This is much easier than the previous function as we can assume that
1286 the recipient has any changenode we aren't sending them."""
1287 the recipient has any changenode we aren't sending them."""
1287
1288
1288 self.hook('preoutgoing', throw=True, source=source)
1289 self.hook('preoutgoing', throw=True, source=source)
1289
1290
1290 cl = self.changelog
1291 cl = self.changelog
1291 nodes = cl.nodesbetween(basenodes, None)[0]
1292 nodes = cl.nodesbetween(basenodes, None)[0]
1292 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1293 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1293
1294
1294 def identity(x):
1295 def identity(x):
1295 return x
1296 return x
1296
1297
1297 def gennodelst(revlog):
1298 def gennodelst(revlog):
1298 for r in xrange(0, revlog.count()):
1299 for r in xrange(0, revlog.count()):
1299 n = revlog.node(r)
1300 n = revlog.node(r)
1300 if revlog.linkrev(n) in revset:
1301 if revlog.linkrev(n) in revset:
1301 yield n
1302 yield n
1302
1303
1303 def changed_file_collector(changedfileset):
1304 def changed_file_collector(changedfileset):
1304 def collect_changed_files(clnode):
1305 def collect_changed_files(clnode):
1305 c = cl.read(clnode)
1306 c = cl.read(clnode)
1306 for fname in c[3]:
1307 for fname in c[3]:
1307 changedfileset[fname] = 1
1308 changedfileset[fname] = 1
1308 return collect_changed_files
1309 return collect_changed_files
1309
1310
1310 def lookuprevlink_func(revlog):
1311 def lookuprevlink_func(revlog):
1311 def lookuprevlink(n):
1312 def lookuprevlink(n):
1312 return cl.node(revlog.linkrev(n))
1313 return cl.node(revlog.linkrev(n))
1313 return lookuprevlink
1314 return lookuprevlink
1314
1315
1315 def gengroup():
1316 def gengroup():
1316 # construct a list of all changed files
1317 # construct a list of all changed files
1317 changedfiles = {}
1318 changedfiles = {}
1318
1319
1319 for chnk in cl.group(nodes, identity,
1320 for chnk in cl.group(nodes, identity,
1320 changed_file_collector(changedfiles)):
1321 changed_file_collector(changedfiles)):
1321 yield chnk
1322 yield chnk
1322 changedfiles = changedfiles.keys()
1323 changedfiles = changedfiles.keys()
1323 changedfiles.sort()
1324 changedfiles.sort()
1324
1325
1325 mnfst = self.manifest
1326 mnfst = self.manifest
1326 nodeiter = gennodelst(mnfst)
1327 nodeiter = gennodelst(mnfst)
1327 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1328 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1328 yield chnk
1329 yield chnk
1329
1330
1330 for fname in changedfiles:
1331 for fname in changedfiles:
1331 filerevlog = self.file(fname)
1332 filerevlog = self.file(fname)
1332 nodeiter = gennodelst(filerevlog)
1333 nodeiter = gennodelst(filerevlog)
1333 nodeiter = list(nodeiter)
1334 nodeiter = list(nodeiter)
1334 if nodeiter:
1335 if nodeiter:
1335 yield struct.pack(">l", len(fname) + 4) + fname
1336 yield struct.pack(">l", len(fname) + 4) + fname
1336 lookup = lookuprevlink_func(filerevlog)
1337 lookup = lookuprevlink_func(filerevlog)
1337 for chnk in filerevlog.group(nodeiter, lookup):
1338 for chnk in filerevlog.group(nodeiter, lookup):
1338 yield chnk
1339 yield chnk
1339
1340
1340 yield struct.pack(">l", 0)
1341 yield struct.pack(">l", 0)
1341 self.hook('outgoing', node=hex(nodes[0]), source=source)
1342 self.hook('outgoing', node=hex(nodes[0]), source=source)
1342
1343
1343 return util.chunkbuffer(gengroup())
1344 return util.chunkbuffer(gengroup())
1344
1345
1345 def addchangegroup(self, source):
1346 def addchangegroup(self, source):
1346
1347
1347 def getchunk():
1348 def getchunk():
1348 d = source.read(4)
1349 d = source.read(4)
1349 if not d:
1350 if not d:
1350 return ""
1351 return ""
1351 l = struct.unpack(">l", d)[0]
1352 l = struct.unpack(">l", d)[0]
1352 if l <= 4:
1353 if l <= 4:
1353 return ""
1354 return ""
1354 d = source.read(l - 4)
1355 d = source.read(l - 4)
1355 if len(d) < l - 4:
1356 if len(d) < l - 4:
1356 raise repo.RepoError(_("premature EOF reading chunk"
1357 raise repo.RepoError(_("premature EOF reading chunk"
1357 " (got %d bytes, expected %d)")
1358 " (got %d bytes, expected %d)")
1358 % (len(d), l - 4))
1359 % (len(d), l - 4))
1359 return d
1360 return d
1360
1361
1361 def getgroup():
1362 def getgroup():
1362 while 1:
1363 while 1:
1363 c = getchunk()
1364 c = getchunk()
1364 if not c:
1365 if not c:
1365 break
1366 break
1366 yield c
1367 yield c
1367
1368
1368 def csmap(x):
1369 def csmap(x):
1369 self.ui.debug(_("add changeset %s\n") % short(x))
1370 self.ui.debug(_("add changeset %s\n") % short(x))
1370 return self.changelog.count()
1371 return self.changelog.count()
1371
1372
1372 def revmap(x):
1373 def revmap(x):
1373 return self.changelog.rev(x)
1374 return self.changelog.rev(x)
1374
1375
1375 if not source:
1376 if not source:
1376 return
1377 return
1377
1378
1378 self.hook('prechangegroup', throw=True)
1379 self.hook('prechangegroup', throw=True)
1379
1380
1380 changesets = files = revisions = 0
1381 changesets = files = revisions = 0
1381
1382
1382 tr = self.transaction()
1383 tr = self.transaction()
1383
1384
1384 oldheads = len(self.changelog.heads())
1385 oldheads = len(self.changelog.heads())
1385
1386
1386 # pull off the changeset group
1387 # pull off the changeset group
1387 self.ui.status(_("adding changesets\n"))
1388 self.ui.status(_("adding changesets\n"))
1388 co = self.changelog.tip()
1389 co = self.changelog.tip()
1389 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1390 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1390 cnr, cor = map(self.changelog.rev, (cn, co))
1391 cnr, cor = map(self.changelog.rev, (cn, co))
1391 if cn == nullid:
1392 if cn == nullid:
1392 cnr = cor
1393 cnr = cor
1393 changesets = cnr - cor
1394 changesets = cnr - cor
1394
1395
1395 # pull off the manifest group
1396 # pull off the manifest group
1396 self.ui.status(_("adding manifests\n"))
1397 self.ui.status(_("adding manifests\n"))
1397 mm = self.manifest.tip()
1398 mm = self.manifest.tip()
1398 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1399 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1399
1400
1400 # process the files
1401 # process the files
1401 self.ui.status(_("adding file changes\n"))
1402 self.ui.status(_("adding file changes\n"))
1402 while 1:
1403 while 1:
1403 f = getchunk()
1404 f = getchunk()
1404 if not f:
1405 if not f:
1405 break
1406 break
1406 self.ui.debug(_("adding %s revisions\n") % f)
1407 self.ui.debug(_("adding %s revisions\n") % f)
1407 fl = self.file(f)
1408 fl = self.file(f)
1408 o = fl.count()
1409 o = fl.count()
1409 n = fl.addgroup(getgroup(), revmap, tr)
1410 n = fl.addgroup(getgroup(), revmap, tr)
1410 revisions += fl.count() - o
1411 revisions += fl.count() - o
1411 files += 1
1412 files += 1
1412
1413
1413 newheads = len(self.changelog.heads())
1414 newheads = len(self.changelog.heads())
1414 heads = ""
1415 heads = ""
1415 if oldheads and newheads > oldheads:
1416 if oldheads and newheads > oldheads:
1416 heads = _(" (+%d heads)") % (newheads - oldheads)
1417 heads = _(" (+%d heads)") % (newheads - oldheads)
1417
1418
1418 self.ui.status(_("added %d changesets"
1419 self.ui.status(_("added %d changesets"
1419 " with %d changes to %d files%s\n")
1420 " with %d changes to %d files%s\n")
1420 % (changesets, revisions, files, heads))
1421 % (changesets, revisions, files, heads))
1421
1422
1422 self.hook('pretxnchangegroup', throw=True,
1423 self.hook('pretxnchangegroup', throw=True,
1423 node=hex(self.changelog.node(cor+1)))
1424 node=hex(self.changelog.node(cor+1)))
1424
1425
1425 tr.close()
1426 tr.close()
1426
1427
1427 if changesets > 0:
1428 if changesets > 0:
1428 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1429 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1429
1430
1430 for i in range(cor + 1, cnr + 1):
1431 for i in range(cor + 1, cnr + 1):
1431 self.hook("incoming", node=hex(self.changelog.node(i)))
1432 self.hook("incoming", node=hex(self.changelog.node(i)))
1432
1433
1433 def update(self, node, allow=False, force=False, choose=None,
1434 def update(self, node, allow=False, force=False, choose=None,
1434 moddirstate=True, forcemerge=False, wlock=None):
1435 moddirstate=True, forcemerge=False, wlock=None):
1435 pl = self.dirstate.parents()
1436 pl = self.dirstate.parents()
1436 if not force and pl[1] != nullid:
1437 if not force and pl[1] != nullid:
1437 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1438 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1438 return 1
1439 return 1
1439
1440
1440 err = False
1441 err = False
1441
1442
1442 p1, p2 = pl[0], node
1443 p1, p2 = pl[0], node
1443 pa = self.changelog.ancestor(p1, p2)
1444 pa = self.changelog.ancestor(p1, p2)
1444 m1n = self.changelog.read(p1)[0]
1445 m1n = self.changelog.read(p1)[0]
1445 m2n = self.changelog.read(p2)[0]
1446 m2n = self.changelog.read(p2)[0]
1446 man = self.manifest.ancestor(m1n, m2n)
1447 man = self.manifest.ancestor(m1n, m2n)
1447 m1 = self.manifest.read(m1n)
1448 m1 = self.manifest.read(m1n)
1448 mf1 = self.manifest.readflags(m1n)
1449 mf1 = self.manifest.readflags(m1n)
1449 m2 = self.manifest.read(m2n).copy()
1450 m2 = self.manifest.read(m2n).copy()
1450 mf2 = self.manifest.readflags(m2n)
1451 mf2 = self.manifest.readflags(m2n)
1451 ma = self.manifest.read(man)
1452 ma = self.manifest.read(man)
1452 mfa = self.manifest.readflags(man)
1453 mfa = self.manifest.readflags(man)
1453
1454
1454 modified, added, removed, deleted, unknown = self.changes()
1455 modified, added, removed, deleted, unknown = self.changes()
1455
1456
1456 # is this a jump, or a merge? i.e. is there a linear path
1457 # is this a jump, or a merge? i.e. is there a linear path
1457 # from p1 to p2?
1458 # from p1 to p2?
1458 linear_path = (pa == p1 or pa == p2)
1459 linear_path = (pa == p1 or pa == p2)
1459
1460
1460 if allow and linear_path:
1461 if allow and linear_path:
1461 raise util.Abort(_("there is nothing to merge, "
1462 raise util.Abort(_("there is nothing to merge, "
1462 "just use 'hg update'"))
1463 "just use 'hg update'"))
1463 if allow and not forcemerge:
1464 if allow and not forcemerge:
1464 if modified or added or removed:
1465 if modified or added or removed:
1465 raise util.Abort(_("outstanding uncommited changes"))
1466 raise util.Abort(_("outstanding uncommited changes"))
1466 if not forcemerge and not force:
1467 if not forcemerge and not force:
1467 for f in unknown:
1468 for f in unknown:
1468 if f in m2:
1469 if f in m2:
1469 t1 = self.wread(f)
1470 t1 = self.wread(f)
1470 t2 = self.file(f).read(m2[f])
1471 t2 = self.file(f).read(m2[f])
1471 if cmp(t1, t2) != 0:
1472 if cmp(t1, t2) != 0:
1472 raise util.Abort(_("'%s' already exists in the working"
1473 raise util.Abort(_("'%s' already exists in the working"
1473 " dir and differs from remote") % f)
1474 " dir and differs from remote") % f)
1474
1475
1475 # resolve the manifest to determine which files
1476 # resolve the manifest to determine which files
1476 # we care about merging
1477 # we care about merging
1477 self.ui.note(_("resolving manifests\n"))
1478 self.ui.note(_("resolving manifests\n"))
1478 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1479 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1479 (force, allow, moddirstate, linear_path))
1480 (force, allow, moddirstate, linear_path))
1480 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1481 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1481 (short(man), short(m1n), short(m2n)))
1482 (short(man), short(m1n), short(m2n)))
1482
1483
1483 merge = {}
1484 merge = {}
1484 get = {}
1485 get = {}
1485 remove = []
1486 remove = []
1486
1487
1487 # construct a working dir manifest
1488 # construct a working dir manifest
1488 mw = m1.copy()
1489 mw = m1.copy()
1489 mfw = mf1.copy()
1490 mfw = mf1.copy()
1490 umap = dict.fromkeys(unknown)
1491 umap = dict.fromkeys(unknown)
1491
1492
1492 for f in added + modified + unknown:
1493 for f in added + modified + unknown:
1493 mw[f] = ""
1494 mw[f] = ""
1494 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1495 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1495
1496
1496 if moddirstate and not wlock:
1497 if moddirstate and not wlock:
1497 wlock = self.wlock()
1498 wlock = self.wlock()
1498
1499
1499 for f in deleted + removed:
1500 for f in deleted + removed:
1500 if f in mw:
1501 if f in mw:
1501 del mw[f]
1502 del mw[f]
1502
1503
1503 # If we're jumping between revisions (as opposed to merging),
1504 # If we're jumping between revisions (as opposed to merging),
1504 # and if neither the working directory nor the target rev has
1505 # and if neither the working directory nor the target rev has
1505 # the file, then we need to remove it from the dirstate, to
1506 # the file, then we need to remove it from the dirstate, to
1506 # prevent the dirstate from listing the file when it is no
1507 # prevent the dirstate from listing the file when it is no
1507 # longer in the manifest.
1508 # longer in the manifest.
1508 if moddirstate and linear_path and f not in m2:
1509 if moddirstate and linear_path and f not in m2:
1509 self.dirstate.forget((f,))
1510 self.dirstate.forget((f,))
1510
1511
1511 # Compare manifests
1512 # Compare manifests
1512 for f, n in mw.iteritems():
1513 for f, n in mw.iteritems():
1513 if choose and not choose(f):
1514 if choose and not choose(f):
1514 continue
1515 continue
1515 if f in m2:
1516 if f in m2:
1516 s = 0
1517 s = 0
1517
1518
1518 # is the wfile new since m1, and match m2?
1519 # is the wfile new since m1, and match m2?
1519 if f not in m1:
1520 if f not in m1:
1520 t1 = self.wread(f)
1521 t1 = self.wread(f)
1521 t2 = self.file(f).read(m2[f])
1522 t2 = self.file(f).read(m2[f])
1522 if cmp(t1, t2) == 0:
1523 if cmp(t1, t2) == 0:
1523 n = m2[f]
1524 n = m2[f]
1524 del t1, t2
1525 del t1, t2
1525
1526
1526 # are files different?
1527 # are files different?
1527 if n != m2[f]:
1528 if n != m2[f]:
1528 a = ma.get(f, nullid)
1529 a = ma.get(f, nullid)
1529 # are both different from the ancestor?
1530 # are both different from the ancestor?
1530 if n != a and m2[f] != a:
1531 if n != a and m2[f] != a:
1531 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1532 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1532 # merge executable bits
1533 # merge executable bits
1533 # "if we changed or they changed, change in merge"
1534 # "if we changed or they changed, change in merge"
1534 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1535 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1535 mode = ((a^b) | (a^c)) ^ a
1536 mode = ((a^b) | (a^c)) ^ a
1536 merge[f] = (m1.get(f, nullid), m2[f], mode)
1537 merge[f] = (m1.get(f, nullid), m2[f], mode)
1537 s = 1
1538 s = 1
1538 # are we clobbering?
1539 # are we clobbering?
1539 # is remote's version newer?
1540 # is remote's version newer?
1540 # or are we going back in time?
1541 # or are we going back in time?
1541 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1542 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1542 self.ui.debug(_(" remote %s is newer, get\n") % f)
1543 self.ui.debug(_(" remote %s is newer, get\n") % f)
1543 get[f] = m2[f]
1544 get[f] = m2[f]
1544 s = 1
1545 s = 1
1545 elif f in umap:
1546 elif f in umap:
1546 # this unknown file is the same as the checkout
1547 # this unknown file is the same as the checkout
1547 get[f] = m2[f]
1548 get[f] = m2[f]
1548
1549
1549 if not s and mfw[f] != mf2[f]:
1550 if not s and mfw[f] != mf2[f]:
1550 if force:
1551 if force:
1551 self.ui.debug(_(" updating permissions for %s\n") % f)
1552 self.ui.debug(_(" updating permissions for %s\n") % f)
1552 util.set_exec(self.wjoin(f), mf2[f])
1553 util.set_exec(self.wjoin(f), mf2[f])
1553 else:
1554 else:
1554 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1555 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1555 mode = ((a^b) | (a^c)) ^ a
1556 mode = ((a^b) | (a^c)) ^ a
1556 if mode != b:
1557 if mode != b:
1557 self.ui.debug(_(" updating permissions for %s\n")
1558 self.ui.debug(_(" updating permissions for %s\n")
1558 % f)
1559 % f)
1559 util.set_exec(self.wjoin(f), mode)
1560 util.set_exec(self.wjoin(f), mode)
1560 del m2[f]
1561 del m2[f]
1561 elif f in ma:
1562 elif f in ma:
1562 if n != ma[f]:
1563 if n != ma[f]:
1563 r = _("d")
1564 r = _("d")
1564 if not force and (linear_path or allow):
1565 if not force and (linear_path or allow):
1565 r = self.ui.prompt(
1566 r = self.ui.prompt(
1566 (_(" local changed %s which remote deleted\n") % f) +
1567 (_(" local changed %s which remote deleted\n") % f) +
1567 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1568 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1568 if r == _("d"):
1569 if r == _("d"):
1569 remove.append(f)
1570 remove.append(f)
1570 else:
1571 else:
1571 self.ui.debug(_("other deleted %s\n") % f)
1572 self.ui.debug(_("other deleted %s\n") % f)
1572 remove.append(f) # other deleted it
1573 remove.append(f) # other deleted it
1573 else:
1574 else:
1574 # file is created on branch or in working directory
1575 # file is created on branch or in working directory
1575 if force and f not in umap:
1576 if force and f not in umap:
1576 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1577 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1577 remove.append(f)
1578 remove.append(f)
1578 elif n == m1.get(f, nullid): # same as parent
1579 elif n == m1.get(f, nullid): # same as parent
1579 if p2 == pa: # going backwards?
1580 if p2 == pa: # going backwards?
1580 self.ui.debug(_("remote deleted %s\n") % f)
1581 self.ui.debug(_("remote deleted %s\n") % f)
1581 remove.append(f)
1582 remove.append(f)
1582 else:
1583 else:
1583 self.ui.debug(_("local modified %s, keeping\n") % f)
1584 self.ui.debug(_("local modified %s, keeping\n") % f)
1584 else:
1585 else:
1585 self.ui.debug(_("working dir created %s, keeping\n") % f)
1586 self.ui.debug(_("working dir created %s, keeping\n") % f)
1586
1587
1587 for f, n in m2.iteritems():
1588 for f, n in m2.iteritems():
1588 if choose and not choose(f):
1589 if choose and not choose(f):
1589 continue
1590 continue
1590 if f[0] == "/":
1591 if f[0] == "/":
1591 continue
1592 continue
1592 if f in ma and n != ma[f]:
1593 if f in ma and n != ma[f]:
1593 r = _("k")
1594 r = _("k")
1594 if not force and (linear_path or allow):
1595 if not force and (linear_path or allow):
1595 r = self.ui.prompt(
1596 r = self.ui.prompt(
1596 (_("remote changed %s which local deleted\n") % f) +
1597 (_("remote changed %s which local deleted\n") % f) +
1597 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1598 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1598 if r == _("k"):
1599 if r == _("k"):
1599 get[f] = n
1600 get[f] = n
1600 elif f not in ma:
1601 elif f not in ma:
1601 self.ui.debug(_("remote created %s\n") % f)
1602 self.ui.debug(_("remote created %s\n") % f)
1602 get[f] = n
1603 get[f] = n
1603 else:
1604 else:
1604 if force or p2 == pa: # going backwards?
1605 if force or p2 == pa: # going backwards?
1605 self.ui.debug(_("local deleted %s, recreating\n") % f)
1606 self.ui.debug(_("local deleted %s, recreating\n") % f)
1606 get[f] = n
1607 get[f] = n
1607 else:
1608 else:
1608 self.ui.debug(_("local deleted %s\n") % f)
1609 self.ui.debug(_("local deleted %s\n") % f)
1609
1610
1610 del mw, m1, m2, ma
1611 del mw, m1, m2, ma
1611
1612
1612 if force:
1613 if force:
1613 for f in merge:
1614 for f in merge:
1614 get[f] = merge[f][1]
1615 get[f] = merge[f][1]
1615 merge = {}
1616 merge = {}
1616
1617
1617 if linear_path or force:
1618 if linear_path or force:
1618 # we don't need to do any magic, just jump to the new rev
1619 # we don't need to do any magic, just jump to the new rev
1619 branch_merge = False
1620 branch_merge = False
1620 p1, p2 = p2, nullid
1621 p1, p2 = p2, nullid
1621 else:
1622 else:
1622 if not allow:
1623 if not allow:
1623 self.ui.status(_("this update spans a branch"
1624 self.ui.status(_("this update spans a branch"
1624 " affecting the following files:\n"))
1625 " affecting the following files:\n"))
1625 fl = merge.keys() + get.keys()
1626 fl = merge.keys() + get.keys()
1626 fl.sort()
1627 fl.sort()
1627 for f in fl:
1628 for f in fl:
1628 cf = ""
1629 cf = ""
1629 if f in merge:
1630 if f in merge:
1630 cf = _(" (resolve)")
1631 cf = _(" (resolve)")
1631 self.ui.status(" %s%s\n" % (f, cf))
1632 self.ui.status(" %s%s\n" % (f, cf))
1632 self.ui.warn(_("aborting update spanning branches!\n"))
1633 self.ui.warn(_("aborting update spanning branches!\n"))
1633 self.ui.status(_("(use update -m to merge across branches"
1634 self.ui.status(_("(use update -m to merge across branches"
1634 " or -C to lose changes)\n"))
1635 " or -C to lose changes)\n"))
1635 return 1
1636 return 1
1636 branch_merge = True
1637 branch_merge = True
1637
1638
1638 # get the files we don't need to change
1639 # get the files we don't need to change
1639 files = get.keys()
1640 files = get.keys()
1640 files.sort()
1641 files.sort()
1641 for f in files:
1642 for f in files:
1642 if f[0] == "/":
1643 if f[0] == "/":
1643 continue
1644 continue
1644 self.ui.note(_("getting %s\n") % f)
1645 self.ui.note(_("getting %s\n") % f)
1645 t = self.file(f).read(get[f])
1646 t = self.file(f).read(get[f])
1646 self.wwrite(f, t)
1647 self.wwrite(f, t)
1647 util.set_exec(self.wjoin(f), mf2[f])
1648 util.set_exec(self.wjoin(f), mf2[f])
1648 if moddirstate:
1649 if moddirstate:
1649 if branch_merge:
1650 if branch_merge:
1650 self.dirstate.update([f], 'n', st_mtime=-1)
1651 self.dirstate.update([f], 'n', st_mtime=-1)
1651 else:
1652 else:
1652 self.dirstate.update([f], 'n')
1653 self.dirstate.update([f], 'n')
1653
1654
1654 # merge the tricky bits
1655 # merge the tricky bits
1655 files = merge.keys()
1656 files = merge.keys()
1656 files.sort()
1657 files.sort()
1657 for f in files:
1658 for f in files:
1658 self.ui.status(_("merging %s\n") % f)
1659 self.ui.status(_("merging %s\n") % f)
1659 my, other, flag = merge[f]
1660 my, other, flag = merge[f]
1660 ret = self.merge3(f, my, other)
1661 ret = self.merge3(f, my, other)
1661 if ret:
1662 if ret:
1662 err = True
1663 err = True
1663 util.set_exec(self.wjoin(f), flag)
1664 util.set_exec(self.wjoin(f), flag)
1664 if moddirstate:
1665 if moddirstate:
1665 if branch_merge:
1666 if branch_merge:
1666 # We've done a branch merge, mark this file as merged
1667 # We've done a branch merge, mark this file as merged
1667 # so that we properly record the merger later
1668 # so that we properly record the merger later
1668 self.dirstate.update([f], 'm')
1669 self.dirstate.update([f], 'm')
1669 else:
1670 else:
1670 # We've update-merged a locally modified file, so
1671 # We've update-merged a locally modified file, so
1671 # we set the dirstate to emulate a normal checkout
1672 # we set the dirstate to emulate a normal checkout
1672 # of that file some time in the past. Thus our
1673 # of that file some time in the past. Thus our
1673 # merge will appear as a normal local file
1674 # merge will appear as a normal local file
1674 # modification.
1675 # modification.
1675 f_len = len(self.file(f).read(other))
1676 f_len = len(self.file(f).read(other))
1676 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1677 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1677
1678
1678 remove.sort()
1679 remove.sort()
1679 for f in remove:
1680 for f in remove:
1680 self.ui.note(_("removing %s\n") % f)
1681 self.ui.note(_("removing %s\n") % f)
1681 try:
1682 try:
1682 util.unlink(self.wjoin(f))
1683 util.unlink(self.wjoin(f))
1683 except OSError, inst:
1684 except OSError, inst:
1684 if inst.errno != errno.ENOENT:
1685 if inst.errno != errno.ENOENT:
1685 self.ui.warn(_("update failed to remove %s: %s!\n") %
1686 self.ui.warn(_("update failed to remove %s: %s!\n") %
1686 (f, inst.strerror))
1687 (f, inst.strerror))
1687 if moddirstate:
1688 if moddirstate:
1688 if branch_merge:
1689 if branch_merge:
1689 self.dirstate.update(remove, 'r')
1690 self.dirstate.update(remove, 'r')
1690 else:
1691 else:
1691 self.dirstate.forget(remove)
1692 self.dirstate.forget(remove)
1692
1693
1693 if moddirstate:
1694 if moddirstate:
1694 self.dirstate.setparents(p1, p2)
1695 self.dirstate.setparents(p1, p2)
1695 return err
1696 return err
1696
1697
1697 def merge3(self, fn, my, other):
1698 def merge3(self, fn, my, other):
1698 """perform a 3-way merge in the working directory"""
1699 """perform a 3-way merge in the working directory"""
1699
1700
1700 def temp(prefix, node):
1701 def temp(prefix, node):
1701 pre = "%s~%s." % (os.path.basename(fn), prefix)
1702 pre = "%s~%s." % (os.path.basename(fn), prefix)
1702 (fd, name) = tempfile.mkstemp("", pre)
1703 (fd, name) = tempfile.mkstemp("", pre)
1703 f = os.fdopen(fd, "wb")
1704 f = os.fdopen(fd, "wb")
1704 self.wwrite(fn, fl.read(node), f)
1705 self.wwrite(fn, fl.read(node), f)
1705 f.close()
1706 f.close()
1706 return name
1707 return name
1707
1708
1708 fl = self.file(fn)
1709 fl = self.file(fn)
1709 base = fl.ancestor(my, other)
1710 base = fl.ancestor(my, other)
1710 a = self.wjoin(fn)
1711 a = self.wjoin(fn)
1711 b = temp("base", base)
1712 b = temp("base", base)
1712 c = temp("other", other)
1713 c = temp("other", other)
1713
1714
1714 self.ui.note(_("resolving %s\n") % fn)
1715 self.ui.note(_("resolving %s\n") % fn)
1715 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1716 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1716 (fn, short(my), short(other), short(base)))
1717 (fn, short(my), short(other), short(base)))
1717
1718
1718 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1719 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1719 or "hgmerge")
1720 or "hgmerge")
1720 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1721 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1721 if r:
1722 if r:
1722 self.ui.warn(_("merging %s failed!\n") % fn)
1723 self.ui.warn(_("merging %s failed!\n") % fn)
1723
1724
1724 os.unlink(b)
1725 os.unlink(b)
1725 os.unlink(c)
1726 os.unlink(c)
1726 return r
1727 return r
1727
1728
1728 def verify(self):
1729 def verify(self):
1729 filelinkrevs = {}
1730 filelinkrevs = {}
1730 filenodes = {}
1731 filenodes = {}
1731 changesets = revisions = files = 0
1732 changesets = revisions = files = 0
1732 errors = [0]
1733 errors = [0]
1733 neededmanifests = {}
1734 neededmanifests = {}
1734
1735
1735 def err(msg):
1736 def err(msg):
1736 self.ui.warn(msg + "\n")
1737 self.ui.warn(msg + "\n")
1737 errors[0] += 1
1738 errors[0] += 1
1738
1739
1739 def checksize(obj, name):
1740 def checksize(obj, name):
1740 d = obj.checksize()
1741 d = obj.checksize()
1741 if d[0]:
1742 if d[0]:
1742 err(_("%s data length off by %d bytes") % (name, d[0]))
1743 err(_("%s data length off by %d bytes") % (name, d[0]))
1743 if d[1]:
1744 if d[1]:
1744 err(_("%s index contains %d extra bytes") % (name, d[1]))
1745 err(_("%s index contains %d extra bytes") % (name, d[1]))
1745
1746
1746 seen = {}
1747 seen = {}
1747 self.ui.status(_("checking changesets\n"))
1748 self.ui.status(_("checking changesets\n"))
1748 checksize(self.changelog, "changelog")
1749 checksize(self.changelog, "changelog")
1749
1750
1750 for i in range(self.changelog.count()):
1751 for i in range(self.changelog.count()):
1751 changesets += 1
1752 changesets += 1
1752 n = self.changelog.node(i)
1753 n = self.changelog.node(i)
1753 l = self.changelog.linkrev(n)
1754 l = self.changelog.linkrev(n)
1754 if l != i:
1755 if l != i:
1755 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1756 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1756 if n in seen:
1757 if n in seen:
1757 err(_("duplicate changeset at revision %d") % i)
1758 err(_("duplicate changeset at revision %d") % i)
1758 seen[n] = 1
1759 seen[n] = 1
1759
1760
1760 for p in self.changelog.parents(n):
1761 for p in self.changelog.parents(n):
1761 if p not in self.changelog.nodemap:
1762 if p not in self.changelog.nodemap:
1762 err(_("changeset %s has unknown parent %s") %
1763 err(_("changeset %s has unknown parent %s") %
1763 (short(n), short(p)))
1764 (short(n), short(p)))
1764 try:
1765 try:
1765 changes = self.changelog.read(n)
1766 changes = self.changelog.read(n)
1766 except KeyboardInterrupt:
1767 except KeyboardInterrupt:
1767 self.ui.warn(_("interrupted"))
1768 self.ui.warn(_("interrupted"))
1768 raise
1769 raise
1769 except Exception, inst:
1770 except Exception, inst:
1770 err(_("unpacking changeset %s: %s") % (short(n), inst))
1771 err(_("unpacking changeset %s: %s") % (short(n), inst))
1771
1772
1772 neededmanifests[changes[0]] = n
1773 neededmanifests[changes[0]] = n
1773
1774
1774 for f in changes[3]:
1775 for f in changes[3]:
1775 filelinkrevs.setdefault(f, []).append(i)
1776 filelinkrevs.setdefault(f, []).append(i)
1776
1777
1777 seen = {}
1778 seen = {}
1778 self.ui.status(_("checking manifests\n"))
1779 self.ui.status(_("checking manifests\n"))
1779 checksize(self.manifest, "manifest")
1780 checksize(self.manifest, "manifest")
1780
1781
1781 for i in range(self.manifest.count()):
1782 for i in range(self.manifest.count()):
1782 n = self.manifest.node(i)
1783 n = self.manifest.node(i)
1783 l = self.manifest.linkrev(n)
1784 l = self.manifest.linkrev(n)
1784
1785
1785 if l < 0 or l >= self.changelog.count():
1786 if l < 0 or l >= self.changelog.count():
1786 err(_("bad manifest link (%d) at revision %d") % (l, i))
1787 err(_("bad manifest link (%d) at revision %d") % (l, i))
1787
1788
1788 if n in neededmanifests:
1789 if n in neededmanifests:
1789 del neededmanifests[n]
1790 del neededmanifests[n]
1790
1791
1791 if n in seen:
1792 if n in seen:
1792 err(_("duplicate manifest at revision %d") % i)
1793 err(_("duplicate manifest at revision %d") % i)
1793
1794
1794 seen[n] = 1
1795 seen[n] = 1
1795
1796
1796 for p in self.manifest.parents(n):
1797 for p in self.manifest.parents(n):
1797 if p not in self.manifest.nodemap:
1798 if p not in self.manifest.nodemap:
1798 err(_("manifest %s has unknown parent %s") %
1799 err(_("manifest %s has unknown parent %s") %
1799 (short(n), short(p)))
1800 (short(n), short(p)))
1800
1801
1801 try:
1802 try:
1802 delta = mdiff.patchtext(self.manifest.delta(n))
1803 delta = mdiff.patchtext(self.manifest.delta(n))
1803 except KeyboardInterrupt:
1804 except KeyboardInterrupt:
1804 self.ui.warn(_("interrupted"))
1805 self.ui.warn(_("interrupted"))
1805 raise
1806 raise
1806 except Exception, inst:
1807 except Exception, inst:
1807 err(_("unpacking manifest %s: %s") % (short(n), inst))
1808 err(_("unpacking manifest %s: %s") % (short(n), inst))
1808
1809
1809 ff = [ l.split('\0') for l in delta.splitlines() ]
1810 ff = [ l.split('\0') for l in delta.splitlines() ]
1810 for f, fn in ff:
1811 for f, fn in ff:
1811 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1812 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1812
1813
1813 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1814 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1814
1815
1815 for m, c in neededmanifests.items():
1816 for m, c in neededmanifests.items():
1816 err(_("Changeset %s refers to unknown manifest %s") %
1817 err(_("Changeset %s refers to unknown manifest %s") %
1817 (short(m), short(c)))
1818 (short(m), short(c)))
1818 del neededmanifests
1819 del neededmanifests
1819
1820
1820 for f in filenodes:
1821 for f in filenodes:
1821 if f not in filelinkrevs:
1822 if f not in filelinkrevs:
1822 err(_("file %s in manifest but not in changesets") % f)
1823 err(_("file %s in manifest but not in changesets") % f)
1823
1824
1824 for f in filelinkrevs:
1825 for f in filelinkrevs:
1825 if f not in filenodes:
1826 if f not in filenodes:
1826 err(_("file %s in changeset but not in manifest") % f)
1827 err(_("file %s in changeset but not in manifest") % f)
1827
1828
1828 self.ui.status(_("checking files\n"))
1829 self.ui.status(_("checking files\n"))
1829 ff = filenodes.keys()
1830 ff = filenodes.keys()
1830 ff.sort()
1831 ff.sort()
1831 for f in ff:
1832 for f in ff:
1832 if f == "/dev/null":
1833 if f == "/dev/null":
1833 continue
1834 continue
1834 files += 1
1835 files += 1
1835 fl = self.file(f)
1836 fl = self.file(f)
1836 checksize(fl, f)
1837 checksize(fl, f)
1837
1838
1838 nodes = {nullid: 1}
1839 nodes = {nullid: 1}
1839 seen = {}
1840 seen = {}
1840 for i in range(fl.count()):
1841 for i in range(fl.count()):
1841 revisions += 1
1842 revisions += 1
1842 n = fl.node(i)
1843 n = fl.node(i)
1843
1844
1844 if n in seen:
1845 if n in seen:
1845 err(_("%s: duplicate revision %d") % (f, i))
1846 err(_("%s: duplicate revision %d") % (f, i))
1846 if n not in filenodes[f]:
1847 if n not in filenodes[f]:
1847 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1848 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1848 else:
1849 else:
1849 del filenodes[f][n]
1850 del filenodes[f][n]
1850
1851
1851 flr = fl.linkrev(n)
1852 flr = fl.linkrev(n)
1852 if flr not in filelinkrevs[f]:
1853 if flr not in filelinkrevs[f]:
1853 err(_("%s:%s points to unexpected changeset %d")
1854 err(_("%s:%s points to unexpected changeset %d")
1854 % (f, short(n), flr))
1855 % (f, short(n), flr))
1855 else:
1856 else:
1856 filelinkrevs[f].remove(flr)
1857 filelinkrevs[f].remove(flr)
1857
1858
1858 # verify contents
1859 # verify contents
1859 try:
1860 try:
1860 t = fl.read(n)
1861 t = fl.read(n)
1861 except KeyboardInterrupt:
1862 except KeyboardInterrupt:
1862 self.ui.warn(_("interrupted"))
1863 self.ui.warn(_("interrupted"))
1863 raise
1864 raise
1864 except Exception, inst:
1865 except Exception, inst:
1865 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1866 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1866
1867
1867 # verify parents
1868 # verify parents
1868 (p1, p2) = fl.parents(n)
1869 (p1, p2) = fl.parents(n)
1869 if p1 not in nodes:
1870 if p1 not in nodes:
1870 err(_("file %s:%s unknown parent 1 %s") %
1871 err(_("file %s:%s unknown parent 1 %s") %
1871 (f, short(n), short(p1)))
1872 (f, short(n), short(p1)))
1872 if p2 not in nodes:
1873 if p2 not in nodes:
1873 err(_("file %s:%s unknown parent 2 %s") %
1874 err(_("file %s:%s unknown parent 2 %s") %
1874 (f, short(n), short(p1)))
1875 (f, short(n), short(p1)))
1875 nodes[n] = 1
1876 nodes[n] = 1
1876
1877
1877 # cross-check
1878 # cross-check
1878 for node in filenodes[f]:
1879 for node in filenodes[f]:
1879 err(_("node %s in manifests not in %s") % (hex(node), f))
1880 err(_("node %s in manifests not in %s") % (hex(node), f))
1880
1881
1881 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1882 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1882 (files, changesets, revisions))
1883 (files, changesets, revisions))
1883
1884
1884 if errors[0]:
1885 if errors[0]:
1885 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1886 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1886 return 1
1887 return 1
1887
1888
1888 # used to avoid circular references so destructors work
1889 # used to avoid circular references so destructors work
1889 def aftertrans(base):
1890 def aftertrans(base):
1890 p = base
1891 p = base
1891 def a():
1892 def a():
1892 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1893 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1893 util.rename(os.path.join(p, "journal.dirstate"),
1894 util.rename(os.path.join(p, "journal.dirstate"),
1894 os.path.join(p, "undo.dirstate"))
1895 os.path.join(p, "undo.dirstate"))
1895 return a
1896 return a
1896
1897
General Comments 0
You need to be logged in to leave comments. Login now