##// END OF EJS Templates
hooks run after transactions finish must not affect method results.
Vadim Gelfer -
r1717:7a4a16a7 default
parent child Browse files
Show More
@@ -1,1846 +1,1840 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct, os, util
8 import struct, os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14
14
15 class localrepository(object):
15 class localrepository(object):
16 def __init__(self, ui, path=None, create=0):
16 def __init__(self, ui, path=None, create=0):
17 if not path:
17 if not path:
18 p = os.getcwd()
18 p = os.getcwd()
19 while not os.path.isdir(os.path.join(p, ".hg")):
19 while not os.path.isdir(os.path.join(p, ".hg")):
20 oldp = p
20 oldp = p
21 p = os.path.dirname(p)
21 p = os.path.dirname(p)
22 if p == oldp:
22 if p == oldp:
23 raise repo.RepoError(_("no repo found"))
23 raise repo.RepoError(_("no repo found"))
24 path = p
24 path = p
25 self.path = os.path.join(path, ".hg")
25 self.path = os.path.join(path, ".hg")
26
26
27 if not create and not os.path.isdir(self.path):
27 if not create and not os.path.isdir(self.path):
28 raise repo.RepoError(_("repository %s not found") % path)
28 raise repo.RepoError(_("repository %s not found") % path)
29
29
30 self.root = os.path.abspath(path)
30 self.root = os.path.abspath(path)
31 self.ui = ui
31 self.ui = ui
32 self.opener = util.opener(self.path)
32 self.opener = util.opener(self.path)
33 self.wopener = util.opener(self.root)
33 self.wopener = util.opener(self.root)
34 self.manifest = manifest.manifest(self.opener)
34 self.manifest = manifest.manifest(self.opener)
35 self.changelog = changelog.changelog(self.opener)
35 self.changelog = changelog.changelog(self.opener)
36 self.tagscache = None
36 self.tagscache = None
37 self.nodetagscache = None
37 self.nodetagscache = None
38 self.encodepats = None
38 self.encodepats = None
39 self.decodepats = None
39 self.decodepats = None
40
40
41 if create:
41 if create:
42 os.mkdir(self.path)
42 os.mkdir(self.path)
43 os.mkdir(self.join("data"))
43 os.mkdir(self.join("data"))
44
44
45 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
45 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
46 try:
46 try:
47 self.ui.readconfig(self.join("hgrc"))
47 self.ui.readconfig(self.join("hgrc"))
48 except IOError:
48 except IOError:
49 pass
49 pass
50
50
51 def hook(self, name, **args):
51 def hook(self, name, **args):
52 def runhook(name, cmd):
52 def runhook(name, cmd):
53 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
53 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
54 old = {}
54 old = {}
55 for k, v in args.items():
55 for k, v in args.items():
56 k = k.upper()
56 k = k.upper()
57 old[k] = os.environ.get(k, None)
57 old[k] = os.environ.get(k, None)
58 os.environ[k] = v
58 os.environ[k] = v
59
59
60 # Hooks run in the repository root
60 # Hooks run in the repository root
61 olddir = os.getcwd()
61 olddir = os.getcwd()
62 os.chdir(self.root)
62 os.chdir(self.root)
63 r = os.system(cmd)
63 r = os.system(cmd)
64 os.chdir(olddir)
64 os.chdir(olddir)
65
65
66 for k, v in old.items():
66 for k, v in old.items():
67 if v != None:
67 if v != None:
68 os.environ[k] = v
68 os.environ[k] = v
69 else:
69 else:
70 del os.environ[k]
70 del os.environ[k]
71
71
72 if r:
72 if r:
73 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
73 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
74 (name, r))
74 (name, r))
75 return False
75 return False
76 return True
76 return True
77
77
78 r = True
78 r = True
79 for hname, cmd in self.ui.configitems("hooks"):
79 for hname, cmd in self.ui.configitems("hooks"):
80 s = hname.split(".")
80 s = hname.split(".")
81 if s[0] == name and cmd:
81 if s[0] == name and cmd:
82 r = runhook(hname, cmd) and r
82 r = runhook(hname, cmd) and r
83 return r
83 return r
84
84
85 def tags(self):
85 def tags(self):
86 '''return a mapping of tag to node'''
86 '''return a mapping of tag to node'''
87 if not self.tagscache:
87 if not self.tagscache:
88 self.tagscache = {}
88 self.tagscache = {}
89 def addtag(self, k, n):
89 def addtag(self, k, n):
90 try:
90 try:
91 bin_n = bin(n)
91 bin_n = bin(n)
92 except TypeError:
92 except TypeError:
93 bin_n = ''
93 bin_n = ''
94 self.tagscache[k.strip()] = bin_n
94 self.tagscache[k.strip()] = bin_n
95
95
96 try:
96 try:
97 # read each head of the tags file, ending with the tip
97 # read each head of the tags file, ending with the tip
98 # and add each tag found to the map, with "newer" ones
98 # and add each tag found to the map, with "newer" ones
99 # taking precedence
99 # taking precedence
100 fl = self.file(".hgtags")
100 fl = self.file(".hgtags")
101 h = fl.heads()
101 h = fl.heads()
102 h.reverse()
102 h.reverse()
103 for r in h:
103 for r in h:
104 for l in fl.read(r).splitlines():
104 for l in fl.read(r).splitlines():
105 if l:
105 if l:
106 n, k = l.split(" ", 1)
106 n, k = l.split(" ", 1)
107 addtag(self, k, n)
107 addtag(self, k, n)
108 except KeyError:
108 except KeyError:
109 pass
109 pass
110
110
111 try:
111 try:
112 f = self.opener("localtags")
112 f = self.opener("localtags")
113 for l in f:
113 for l in f:
114 n, k = l.split(" ", 1)
114 n, k = l.split(" ", 1)
115 addtag(self, k, n)
115 addtag(self, k, n)
116 except IOError:
116 except IOError:
117 pass
117 pass
118
118
119 self.tagscache['tip'] = self.changelog.tip()
119 self.tagscache['tip'] = self.changelog.tip()
120
120
121 return self.tagscache
121 return self.tagscache
122
122
123 def tagslist(self):
123 def tagslist(self):
124 '''return a list of tags ordered by revision'''
124 '''return a list of tags ordered by revision'''
125 l = []
125 l = []
126 for t, n in self.tags().items():
126 for t, n in self.tags().items():
127 try:
127 try:
128 r = self.changelog.rev(n)
128 r = self.changelog.rev(n)
129 except:
129 except:
130 r = -2 # sort to the beginning of the list if unknown
130 r = -2 # sort to the beginning of the list if unknown
131 l.append((r, t, n))
131 l.append((r, t, n))
132 l.sort()
132 l.sort()
133 return [(t, n) for r, t, n in l]
133 return [(t, n) for r, t, n in l]
134
134
135 def nodetags(self, node):
135 def nodetags(self, node):
136 '''return the tags associated with a node'''
136 '''return the tags associated with a node'''
137 if not self.nodetagscache:
137 if not self.nodetagscache:
138 self.nodetagscache = {}
138 self.nodetagscache = {}
139 for t, n in self.tags().items():
139 for t, n in self.tags().items():
140 self.nodetagscache.setdefault(n, []).append(t)
140 self.nodetagscache.setdefault(n, []).append(t)
141 return self.nodetagscache.get(node, [])
141 return self.nodetagscache.get(node, [])
142
142
143 def lookup(self, key):
143 def lookup(self, key):
144 try:
144 try:
145 return self.tags()[key]
145 return self.tags()[key]
146 except KeyError:
146 except KeyError:
147 try:
147 try:
148 return self.changelog.lookup(key)
148 return self.changelog.lookup(key)
149 except:
149 except:
150 raise repo.RepoError(_("unknown revision '%s'") % key)
150 raise repo.RepoError(_("unknown revision '%s'") % key)
151
151
152 def dev(self):
152 def dev(self):
153 return os.stat(self.path).st_dev
153 return os.stat(self.path).st_dev
154
154
155 def local(self):
155 def local(self):
156 return True
156 return True
157
157
158 def join(self, f):
158 def join(self, f):
159 return os.path.join(self.path, f)
159 return os.path.join(self.path, f)
160
160
161 def wjoin(self, f):
161 def wjoin(self, f):
162 return os.path.join(self.root, f)
162 return os.path.join(self.root, f)
163
163
164 def file(self, f):
164 def file(self, f):
165 if f[0] == '/':
165 if f[0] == '/':
166 f = f[1:]
166 f = f[1:]
167 return filelog.filelog(self.opener, f)
167 return filelog.filelog(self.opener, f)
168
168
169 def getcwd(self):
169 def getcwd(self):
170 return self.dirstate.getcwd()
170 return self.dirstate.getcwd()
171
171
172 def wfile(self, f, mode='r'):
172 def wfile(self, f, mode='r'):
173 return self.wopener(f, mode)
173 return self.wopener(f, mode)
174
174
175 def wread(self, filename):
175 def wread(self, filename):
176 if self.encodepats == None:
176 if self.encodepats == None:
177 l = []
177 l = []
178 for pat, cmd in self.ui.configitems("encode"):
178 for pat, cmd in self.ui.configitems("encode"):
179 mf = util.matcher("", "/", [pat], [], [])[1]
179 mf = util.matcher("", "/", [pat], [], [])[1]
180 l.append((mf, cmd))
180 l.append((mf, cmd))
181 self.encodepats = l
181 self.encodepats = l
182
182
183 data = self.wopener(filename, 'r').read()
183 data = self.wopener(filename, 'r').read()
184
184
185 for mf, cmd in self.encodepats:
185 for mf, cmd in self.encodepats:
186 if mf(filename):
186 if mf(filename):
187 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
187 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
188 data = util.filter(data, cmd)
188 data = util.filter(data, cmd)
189 break
189 break
190
190
191 return data
191 return data
192
192
193 def wwrite(self, filename, data, fd=None):
193 def wwrite(self, filename, data, fd=None):
194 if self.decodepats == None:
194 if self.decodepats == None:
195 l = []
195 l = []
196 for pat, cmd in self.ui.configitems("decode"):
196 for pat, cmd in self.ui.configitems("decode"):
197 mf = util.matcher("", "/", [pat], [], [])[1]
197 mf = util.matcher("", "/", [pat], [], [])[1]
198 l.append((mf, cmd))
198 l.append((mf, cmd))
199 self.decodepats = l
199 self.decodepats = l
200
200
201 for mf, cmd in self.decodepats:
201 for mf, cmd in self.decodepats:
202 if mf(filename):
202 if mf(filename):
203 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
203 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
204 data = util.filter(data, cmd)
204 data = util.filter(data, cmd)
205 break
205 break
206
206
207 if fd:
207 if fd:
208 return fd.write(data)
208 return fd.write(data)
209 return self.wopener(filename, 'w').write(data)
209 return self.wopener(filename, 'w').write(data)
210
210
211 def transaction(self):
211 def transaction(self):
212 # save dirstate for undo
212 # save dirstate for undo
213 try:
213 try:
214 ds = self.opener("dirstate").read()
214 ds = self.opener("dirstate").read()
215 except IOError:
215 except IOError:
216 ds = ""
216 ds = ""
217 self.opener("journal.dirstate", "w").write(ds)
217 self.opener("journal.dirstate", "w").write(ds)
218
218
219 def after():
219 def after():
220 util.rename(self.join("journal"), self.join("undo"))
220 util.rename(self.join("journal"), self.join("undo"))
221 util.rename(self.join("journal.dirstate"),
221 util.rename(self.join("journal.dirstate"),
222 self.join("undo.dirstate"))
222 self.join("undo.dirstate"))
223
223
224 return transaction.transaction(self.ui.warn, self.opener,
224 return transaction.transaction(self.ui.warn, self.opener,
225 self.join("journal"), after)
225 self.join("journal"), after)
226
226
227 def recover(self):
227 def recover(self):
228 lock = self.lock()
228 lock = self.lock()
229 if os.path.exists(self.join("journal")):
229 if os.path.exists(self.join("journal")):
230 self.ui.status(_("rolling back interrupted transaction\n"))
230 self.ui.status(_("rolling back interrupted transaction\n"))
231 transaction.rollback(self.opener, self.join("journal"))
231 transaction.rollback(self.opener, self.join("journal"))
232 self.manifest = manifest.manifest(self.opener)
232 self.manifest = manifest.manifest(self.opener)
233 self.changelog = changelog.changelog(self.opener)
233 self.changelog = changelog.changelog(self.opener)
234 return True
234 return True
235 else:
235 else:
236 self.ui.warn(_("no interrupted transaction available\n"))
236 self.ui.warn(_("no interrupted transaction available\n"))
237 return False
237 return False
238
238
239 def undo(self, wlock=None):
239 def undo(self, wlock=None):
240 if not wlock:
240 if not wlock:
241 wlock = self.wlock()
241 wlock = self.wlock()
242 lock = self.lock()
242 lock = self.lock()
243 if os.path.exists(self.join("undo")):
243 if os.path.exists(self.join("undo")):
244 self.ui.status(_("rolling back last transaction\n"))
244 self.ui.status(_("rolling back last transaction\n"))
245 transaction.rollback(self.opener, self.join("undo"))
245 transaction.rollback(self.opener, self.join("undo"))
246 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
246 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
247 self.dirstate.read()
247 self.dirstate.read()
248 else:
248 else:
249 self.ui.warn(_("no undo information available\n"))
249 self.ui.warn(_("no undo information available\n"))
250
250
251 def lock(self, wait=1):
251 def lock(self, wait=1):
252 try:
252 try:
253 return lock.lock(self.join("lock"), 0)
253 return lock.lock(self.join("lock"), 0)
254 except lock.LockHeld, inst:
254 except lock.LockHeld, inst:
255 if wait:
255 if wait:
256 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
256 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
257 return lock.lock(self.join("lock"), wait)
257 return lock.lock(self.join("lock"), wait)
258 raise inst
258 raise inst
259
259
260 def wlock(self, wait=1):
260 def wlock(self, wait=1):
261 try:
261 try:
262 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
262 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
263 except lock.LockHeld, inst:
263 except lock.LockHeld, inst:
264 if not wait:
264 if not wait:
265 raise inst
265 raise inst
266 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
266 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
267 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
267 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
268 self.dirstate.read()
268 self.dirstate.read()
269 return wlock
269 return wlock
270
270
271 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
271 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
272 orig_parent = self.dirstate.parents()[0] or nullid
272 orig_parent = self.dirstate.parents()[0] or nullid
273 p1 = p1 or self.dirstate.parents()[0] or nullid
273 p1 = p1 or self.dirstate.parents()[0] or nullid
274 p2 = p2 or self.dirstate.parents()[1] or nullid
274 p2 = p2 or self.dirstate.parents()[1] or nullid
275 c1 = self.changelog.read(p1)
275 c1 = self.changelog.read(p1)
276 c2 = self.changelog.read(p2)
276 c2 = self.changelog.read(p2)
277 m1 = self.manifest.read(c1[0])
277 m1 = self.manifest.read(c1[0])
278 mf1 = self.manifest.readflags(c1[0])
278 mf1 = self.manifest.readflags(c1[0])
279 m2 = self.manifest.read(c2[0])
279 m2 = self.manifest.read(c2[0])
280 changed = []
280 changed = []
281
281
282 if orig_parent == p1:
282 if orig_parent == p1:
283 update_dirstate = 1
283 update_dirstate = 1
284 else:
284 else:
285 update_dirstate = 0
285 update_dirstate = 0
286
286
287 if not wlock:
287 if not wlock:
288 wlock = self.wlock()
288 wlock = self.wlock()
289 lock = self.lock()
289 lock = self.lock()
290 tr = self.transaction()
290 tr = self.transaction()
291 mm = m1.copy()
291 mm = m1.copy()
292 mfm = mf1.copy()
292 mfm = mf1.copy()
293 linkrev = self.changelog.count()
293 linkrev = self.changelog.count()
294 for f in files:
294 for f in files:
295 try:
295 try:
296 t = self.wread(f)
296 t = self.wread(f)
297 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
297 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
298 r = self.file(f)
298 r = self.file(f)
299 mfm[f] = tm
299 mfm[f] = tm
300
300
301 fp1 = m1.get(f, nullid)
301 fp1 = m1.get(f, nullid)
302 fp2 = m2.get(f, nullid)
302 fp2 = m2.get(f, nullid)
303
303
304 # is the same revision on two branches of a merge?
304 # is the same revision on two branches of a merge?
305 if fp2 == fp1:
305 if fp2 == fp1:
306 fp2 = nullid
306 fp2 = nullid
307
307
308 if fp2 != nullid:
308 if fp2 != nullid:
309 # is one parent an ancestor of the other?
309 # is one parent an ancestor of the other?
310 fpa = r.ancestor(fp1, fp2)
310 fpa = r.ancestor(fp1, fp2)
311 if fpa == fp1:
311 if fpa == fp1:
312 fp1, fp2 = fp2, nullid
312 fp1, fp2 = fp2, nullid
313 elif fpa == fp2:
313 elif fpa == fp2:
314 fp2 = nullid
314 fp2 = nullid
315
315
316 # is the file unmodified from the parent?
316 # is the file unmodified from the parent?
317 if t == r.read(fp1):
317 if t == r.read(fp1):
318 # record the proper existing parent in manifest
318 # record the proper existing parent in manifest
319 # no need to add a revision
319 # no need to add a revision
320 mm[f] = fp1
320 mm[f] = fp1
321 continue
321 continue
322
322
323 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
323 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
324 changed.append(f)
324 changed.append(f)
325 if update_dirstate:
325 if update_dirstate:
326 self.dirstate.update([f], "n")
326 self.dirstate.update([f], "n")
327 except IOError:
327 except IOError:
328 try:
328 try:
329 del mm[f]
329 del mm[f]
330 del mfm[f]
330 del mfm[f]
331 if update_dirstate:
331 if update_dirstate:
332 self.dirstate.forget([f])
332 self.dirstate.forget([f])
333 except:
333 except:
334 # deleted from p2?
334 # deleted from p2?
335 pass
335 pass
336
336
337 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
337 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
338 user = user or self.ui.username()
338 user = user or self.ui.username()
339 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
339 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
340 tr.close()
340 tr.close()
341 if update_dirstate:
341 if update_dirstate:
342 self.dirstate.setparents(n, nullid)
342 self.dirstate.setparents(n, nullid)
343
343
344 def commit(self, files=None, text="", user=None, date=None,
344 def commit(self, files=None, text="", user=None, date=None,
345 match=util.always, force=False, wlock=None):
345 match=util.always, force=False, wlock=None):
346 commit = []
346 commit = []
347 remove = []
347 remove = []
348 changed = []
348 changed = []
349
349
350 if files:
350 if files:
351 for f in files:
351 for f in files:
352 s = self.dirstate.state(f)
352 s = self.dirstate.state(f)
353 if s in 'nmai':
353 if s in 'nmai':
354 commit.append(f)
354 commit.append(f)
355 elif s == 'r':
355 elif s == 'r':
356 remove.append(f)
356 remove.append(f)
357 else:
357 else:
358 self.ui.warn(_("%s not tracked!\n") % f)
358 self.ui.warn(_("%s not tracked!\n") % f)
359 else:
359 else:
360 modified, added, removed, deleted, unknown = self.changes(match=match)
360 modified, added, removed, deleted, unknown = self.changes(match=match)
361 commit = modified + added
361 commit = modified + added
362 remove = removed
362 remove = removed
363
363
364 p1, p2 = self.dirstate.parents()
364 p1, p2 = self.dirstate.parents()
365 c1 = self.changelog.read(p1)
365 c1 = self.changelog.read(p1)
366 c2 = self.changelog.read(p2)
366 c2 = self.changelog.read(p2)
367 m1 = self.manifest.read(c1[0])
367 m1 = self.manifest.read(c1[0])
368 mf1 = self.manifest.readflags(c1[0])
368 mf1 = self.manifest.readflags(c1[0])
369 m2 = self.manifest.read(c2[0])
369 m2 = self.manifest.read(c2[0])
370
370
371 if not commit and not remove and not force and p2 == nullid:
371 if not commit and not remove and not force and p2 == nullid:
372 self.ui.status(_("nothing changed\n"))
372 self.ui.status(_("nothing changed\n"))
373 return None
373 return None
374
374
375 if not self.hook("precommit"):
375 if not self.hook("precommit"):
376 return None
376 return None
377
377
378 if not wlock:
378 if not wlock:
379 wlock = self.wlock()
379 wlock = self.wlock()
380 lock = self.lock()
380 lock = self.lock()
381 tr = self.transaction()
381 tr = self.transaction()
382
382
383 # check in files
383 # check in files
384 new = {}
384 new = {}
385 linkrev = self.changelog.count()
385 linkrev = self.changelog.count()
386 commit.sort()
386 commit.sort()
387 for f in commit:
387 for f in commit:
388 self.ui.note(f + "\n")
388 self.ui.note(f + "\n")
389 try:
389 try:
390 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
390 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
391 t = self.wread(f)
391 t = self.wread(f)
392 except IOError:
392 except IOError:
393 self.ui.warn(_("trouble committing %s!\n") % f)
393 self.ui.warn(_("trouble committing %s!\n") % f)
394 raise
394 raise
395
395
396 r = self.file(f)
396 r = self.file(f)
397
397
398 meta = {}
398 meta = {}
399 cp = self.dirstate.copied(f)
399 cp = self.dirstate.copied(f)
400 if cp:
400 if cp:
401 meta["copy"] = cp
401 meta["copy"] = cp
402 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
402 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
403 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
403 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
404 fp1, fp2 = nullid, nullid
404 fp1, fp2 = nullid, nullid
405 else:
405 else:
406 fp1 = m1.get(f, nullid)
406 fp1 = m1.get(f, nullid)
407 fp2 = m2.get(f, nullid)
407 fp2 = m2.get(f, nullid)
408
408
409 if fp2 != nullid:
409 if fp2 != nullid:
410 # is one parent an ancestor of the other?
410 # is one parent an ancestor of the other?
411 fpa = r.ancestor(fp1, fp2)
411 fpa = r.ancestor(fp1, fp2)
412 if fpa == fp1:
412 if fpa == fp1:
413 fp1, fp2 = fp2, nullid
413 fp1, fp2 = fp2, nullid
414 elif fpa == fp2:
414 elif fpa == fp2:
415 fp2 = nullid
415 fp2 = nullid
416
416
417 # is the file unmodified from the parent?
417 # is the file unmodified from the parent?
418 if not meta and t == r.read(fp1) and fp2 == nullid:
418 if not meta and t == r.read(fp1) and fp2 == nullid:
419 # record the proper existing parent in manifest
419 # record the proper existing parent in manifest
420 # no need to add a revision
420 # no need to add a revision
421 new[f] = fp1
421 new[f] = fp1
422 continue
422 continue
423
423
424 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
424 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
425 # remember what we've added so that we can later calculate
425 # remember what we've added so that we can later calculate
426 # the files to pull from a set of changesets
426 # the files to pull from a set of changesets
427 changed.append(f)
427 changed.append(f)
428
428
429 # update manifest
429 # update manifest
430 m1 = m1.copy()
430 m1 = m1.copy()
431 m1.update(new)
431 m1.update(new)
432 for f in remove:
432 for f in remove:
433 if f in m1:
433 if f in m1:
434 del m1[f]
434 del m1[f]
435 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
435 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
436 (new, remove))
436 (new, remove))
437
437
438 # add changeset
438 # add changeset
439 new = new.keys()
439 new = new.keys()
440 new.sort()
440 new.sort()
441
441
442 if not text:
442 if not text:
443 edittext = [""]
443 edittext = [""]
444 if p2 != nullid:
444 if p2 != nullid:
445 edittext.append("HG: branch merge")
445 edittext.append("HG: branch merge")
446 edittext.extend(["HG: changed %s" % f for f in changed])
446 edittext.extend(["HG: changed %s" % f for f in changed])
447 edittext.extend(["HG: removed %s" % f for f in remove])
447 edittext.extend(["HG: removed %s" % f for f in remove])
448 if not changed and not remove:
448 if not changed and not remove:
449 edittext.append("HG: no files changed")
449 edittext.append("HG: no files changed")
450 edittext.append("")
450 edittext.append("")
451 # run editor in the repository root
451 # run editor in the repository root
452 olddir = os.getcwd()
452 olddir = os.getcwd()
453 os.chdir(self.root)
453 os.chdir(self.root)
454 edittext = self.ui.edit("\n".join(edittext))
454 edittext = self.ui.edit("\n".join(edittext))
455 os.chdir(olddir)
455 os.chdir(olddir)
456 if not edittext.rstrip():
456 if not edittext.rstrip():
457 return None
457 return None
458 text = edittext
458 text = edittext
459
459
460 user = user or self.ui.username()
460 user = user or self.ui.username()
461 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
461 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
462 tr.close()
462 tr.close()
463
463
464 self.dirstate.setparents(n)
464 self.dirstate.setparents(n)
465 self.dirstate.update(new, "n")
465 self.dirstate.update(new, "n")
466 self.dirstate.forget(remove)
466 self.dirstate.forget(remove)
467
467
468 if not self.hook("commit", node=hex(n)):
468 self.hook("commit", node=hex(n))
469 return None
470 return n
469 return n
471
470
472 def walk(self, node=None, files=[], match=util.always):
471 def walk(self, node=None, files=[], match=util.always):
473 if node:
472 if node:
474 fdict = dict.fromkeys(files)
473 fdict = dict.fromkeys(files)
475 for fn in self.manifest.read(self.changelog.read(node)[0]):
474 for fn in self.manifest.read(self.changelog.read(node)[0]):
476 fdict.pop(fn, None)
475 fdict.pop(fn, None)
477 if match(fn):
476 if match(fn):
478 yield 'm', fn
477 yield 'm', fn
479 for fn in fdict:
478 for fn in fdict:
480 self.ui.warn(_('%s: No such file in rev %s\n') % (
479 self.ui.warn(_('%s: No such file in rev %s\n') % (
481 util.pathto(self.getcwd(), fn), short(node)))
480 util.pathto(self.getcwd(), fn), short(node)))
482 else:
481 else:
483 for src, fn in self.dirstate.walk(files, match):
482 for src, fn in self.dirstate.walk(files, match):
484 yield src, fn
483 yield src, fn
485
484
486 def changes(self, node1=None, node2=None, files=[], match=util.always,
485 def changes(self, node1=None, node2=None, files=[], match=util.always,
487 wlock=None):
486 wlock=None):
488 """return changes between two nodes or node and working directory
487 """return changes between two nodes or node and working directory
489
488
490 If node1 is None, use the first dirstate parent instead.
489 If node1 is None, use the first dirstate parent instead.
491 If node2 is None, compare node1 with working directory.
490 If node2 is None, compare node1 with working directory.
492 """
491 """
493
492
494 def fcmp(fn, mf):
493 def fcmp(fn, mf):
495 t1 = self.wread(fn)
494 t1 = self.wread(fn)
496 t2 = self.file(fn).read(mf.get(fn, nullid))
495 t2 = self.file(fn).read(mf.get(fn, nullid))
497 return cmp(t1, t2)
496 return cmp(t1, t2)
498
497
499 def mfmatches(node):
498 def mfmatches(node):
500 change = self.changelog.read(node)
499 change = self.changelog.read(node)
501 mf = dict(self.manifest.read(change[0]))
500 mf = dict(self.manifest.read(change[0]))
502 for fn in mf.keys():
501 for fn in mf.keys():
503 if not match(fn):
502 if not match(fn):
504 del mf[fn]
503 del mf[fn]
505 return mf
504 return mf
506
505
507 # are we comparing the working directory?
506 # are we comparing the working directory?
508 if not node2:
507 if not node2:
509 if not wlock:
508 if not wlock:
510 try:
509 try:
511 wlock = self.wlock(wait=0)
510 wlock = self.wlock(wait=0)
512 except lock.LockHeld:
511 except lock.LockHeld:
513 wlock = None
512 wlock = None
514 lookup, modified, added, removed, deleted, unknown = (
513 lookup, modified, added, removed, deleted, unknown = (
515 self.dirstate.changes(files, match))
514 self.dirstate.changes(files, match))
516
515
517 # are we comparing working dir against its parent?
516 # are we comparing working dir against its parent?
518 if not node1:
517 if not node1:
519 if lookup:
518 if lookup:
520 # do a full compare of any files that might have changed
519 # do a full compare of any files that might have changed
521 mf2 = mfmatches(self.dirstate.parents()[0])
520 mf2 = mfmatches(self.dirstate.parents()[0])
522 for f in lookup:
521 for f in lookup:
523 if fcmp(f, mf2):
522 if fcmp(f, mf2):
524 modified.append(f)
523 modified.append(f)
525 elif wlock is not None:
524 elif wlock is not None:
526 self.dirstate.update([f], "n")
525 self.dirstate.update([f], "n")
527 else:
526 else:
528 # we are comparing working dir against non-parent
527 # we are comparing working dir against non-parent
529 # generate a pseudo-manifest for the working dir
528 # generate a pseudo-manifest for the working dir
530 mf2 = mfmatches(self.dirstate.parents()[0])
529 mf2 = mfmatches(self.dirstate.parents()[0])
531 for f in lookup + modified + added:
530 for f in lookup + modified + added:
532 mf2[f] = ""
531 mf2[f] = ""
533 for f in removed:
532 for f in removed:
534 if f in mf2:
533 if f in mf2:
535 del mf2[f]
534 del mf2[f]
536 else:
535 else:
537 # we are comparing two revisions
536 # we are comparing two revisions
538 deleted, unknown = [], []
537 deleted, unknown = [], []
539 mf2 = mfmatches(node2)
538 mf2 = mfmatches(node2)
540
539
541 if node1:
540 if node1:
542 # flush lists from dirstate before comparing manifests
541 # flush lists from dirstate before comparing manifests
543 modified, added = [], []
542 modified, added = [], []
544
543
545 mf1 = mfmatches(node1)
544 mf1 = mfmatches(node1)
546
545
547 for fn in mf2:
546 for fn in mf2:
548 if mf1.has_key(fn):
547 if mf1.has_key(fn):
549 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
548 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
550 modified.append(fn)
549 modified.append(fn)
551 del mf1[fn]
550 del mf1[fn]
552 else:
551 else:
553 added.append(fn)
552 added.append(fn)
554
553
555 removed = mf1.keys()
554 removed = mf1.keys()
556
555
557 # sort and return results:
556 # sort and return results:
558 for l in modified, added, removed, deleted, unknown:
557 for l in modified, added, removed, deleted, unknown:
559 l.sort()
558 l.sort()
560 return (modified, added, removed, deleted, unknown)
559 return (modified, added, removed, deleted, unknown)
561
560
562 def add(self, list, wlock=None):
561 def add(self, list, wlock=None):
563 if not wlock:
562 if not wlock:
564 wlock = self.wlock()
563 wlock = self.wlock()
565 for f in list:
564 for f in list:
566 p = self.wjoin(f)
565 p = self.wjoin(f)
567 if not os.path.exists(p):
566 if not os.path.exists(p):
568 self.ui.warn(_("%s does not exist!\n") % f)
567 self.ui.warn(_("%s does not exist!\n") % f)
569 elif not os.path.isfile(p):
568 elif not os.path.isfile(p):
570 self.ui.warn(_("%s not added: only files supported currently\n")
569 self.ui.warn(_("%s not added: only files supported currently\n")
571 % f)
570 % f)
572 elif self.dirstate.state(f) in 'an':
571 elif self.dirstate.state(f) in 'an':
573 self.ui.warn(_("%s already tracked!\n") % f)
572 self.ui.warn(_("%s already tracked!\n") % f)
574 else:
573 else:
575 self.dirstate.update([f], "a")
574 self.dirstate.update([f], "a")
576
575
577 def forget(self, list, wlock=None):
576 def forget(self, list, wlock=None):
578 if not wlock:
577 if not wlock:
579 wlock = self.wlock()
578 wlock = self.wlock()
580 for f in list:
579 for f in list:
581 if self.dirstate.state(f) not in 'ai':
580 if self.dirstate.state(f) not in 'ai':
582 self.ui.warn(_("%s not added!\n") % f)
581 self.ui.warn(_("%s not added!\n") % f)
583 else:
582 else:
584 self.dirstate.forget([f])
583 self.dirstate.forget([f])
585
584
586 def remove(self, list, unlink=False, wlock=None):
585 def remove(self, list, unlink=False, wlock=None):
587 if unlink:
586 if unlink:
588 for f in list:
587 for f in list:
589 try:
588 try:
590 util.unlink(self.wjoin(f))
589 util.unlink(self.wjoin(f))
591 except OSError, inst:
590 except OSError, inst:
592 if inst.errno != errno.ENOENT:
591 if inst.errno != errno.ENOENT:
593 raise
592 raise
594 if not wlock:
593 if not wlock:
595 wlock = self.wlock()
594 wlock = self.wlock()
596 for f in list:
595 for f in list:
597 p = self.wjoin(f)
596 p = self.wjoin(f)
598 if os.path.exists(p):
597 if os.path.exists(p):
599 self.ui.warn(_("%s still exists!\n") % f)
598 self.ui.warn(_("%s still exists!\n") % f)
600 elif self.dirstate.state(f) == 'a':
599 elif self.dirstate.state(f) == 'a':
601 self.ui.warn(_("%s never committed!\n") % f)
600 self.ui.warn(_("%s never committed!\n") % f)
602 self.dirstate.forget([f])
601 self.dirstate.forget([f])
603 elif f not in self.dirstate:
602 elif f not in self.dirstate:
604 self.ui.warn(_("%s not tracked!\n") % f)
603 self.ui.warn(_("%s not tracked!\n") % f)
605 else:
604 else:
606 self.dirstate.update([f], "r")
605 self.dirstate.update([f], "r")
607
606
608 def undelete(self, list, wlock=None):
607 def undelete(self, list, wlock=None):
609 p = self.dirstate.parents()[0]
608 p = self.dirstate.parents()[0]
610 mn = self.changelog.read(p)[0]
609 mn = self.changelog.read(p)[0]
611 mf = self.manifest.readflags(mn)
610 mf = self.manifest.readflags(mn)
612 m = self.manifest.read(mn)
611 m = self.manifest.read(mn)
613 if not wlock:
612 if not wlock:
614 wlock = self.wlock()
613 wlock = self.wlock()
615 for f in list:
614 for f in list:
616 if self.dirstate.state(f) not in "r":
615 if self.dirstate.state(f) not in "r":
617 self.ui.warn("%s not removed!\n" % f)
616 self.ui.warn("%s not removed!\n" % f)
618 else:
617 else:
619 t = self.file(f).read(m[f])
618 t = self.file(f).read(m[f])
620 self.wwrite(f, t)
619 self.wwrite(f, t)
621 util.set_exec(self.wjoin(f), mf[f])
620 util.set_exec(self.wjoin(f), mf[f])
622 self.dirstate.update([f], "n")
621 self.dirstate.update([f], "n")
623
622
624 def copy(self, source, dest, wlock=None):
623 def copy(self, source, dest, wlock=None):
625 p = self.wjoin(dest)
624 p = self.wjoin(dest)
626 if not os.path.exists(p):
625 if not os.path.exists(p):
627 self.ui.warn(_("%s does not exist!\n") % dest)
626 self.ui.warn(_("%s does not exist!\n") % dest)
628 elif not os.path.isfile(p):
627 elif not os.path.isfile(p):
629 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
628 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
630 else:
629 else:
631 if not wlock:
630 if not wlock:
632 wlock = self.wlock()
631 wlock = self.wlock()
633 if self.dirstate.state(dest) == '?':
632 if self.dirstate.state(dest) == '?':
634 self.dirstate.update([dest], "a")
633 self.dirstate.update([dest], "a")
635 self.dirstate.copy(source, dest)
634 self.dirstate.copy(source, dest)
636
635
637 def heads(self, start=None):
636 def heads(self, start=None):
638 heads = self.changelog.heads(start)
637 heads = self.changelog.heads(start)
639 # sort the output in rev descending order
638 # sort the output in rev descending order
640 heads = [(-self.changelog.rev(h), h) for h in heads]
639 heads = [(-self.changelog.rev(h), h) for h in heads]
641 heads.sort()
640 heads.sort()
642 return [n for (r, n) in heads]
641 return [n for (r, n) in heads]
643
642
644 # branchlookup returns a dict giving a list of branches for
643 # branchlookup returns a dict giving a list of branches for
645 # each head. A branch is defined as the tag of a node or
644 # each head. A branch is defined as the tag of a node or
646 # the branch of the node's parents. If a node has multiple
645 # the branch of the node's parents. If a node has multiple
647 # branch tags, tags are eliminated if they are visible from other
646 # branch tags, tags are eliminated if they are visible from other
648 # branch tags.
647 # branch tags.
649 #
648 #
650 # So, for this graph: a->b->c->d->e
649 # So, for this graph: a->b->c->d->e
651 # \ /
650 # \ /
652 # aa -----/
651 # aa -----/
653 # a has tag 2.6.12
652 # a has tag 2.6.12
654 # d has tag 2.6.13
653 # d has tag 2.6.13
655 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
654 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
656 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
655 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
657 # from the list.
656 # from the list.
658 #
657 #
659 # It is possible that more than one head will have the same branch tag.
658 # It is possible that more than one head will have the same branch tag.
660 # callers need to check the result for multiple heads under the same
659 # callers need to check the result for multiple heads under the same
661 # branch tag if that is a problem for them (ie checkout of a specific
660 # branch tag if that is a problem for them (ie checkout of a specific
662 # branch).
661 # branch).
663 #
662 #
664 # passing in a specific branch will limit the depth of the search
663 # passing in a specific branch will limit the depth of the search
665 # through the parents. It won't limit the branches returned in the
664 # through the parents. It won't limit the branches returned in the
666 # result though.
665 # result though.
667 def branchlookup(self, heads=None, branch=None):
666 def branchlookup(self, heads=None, branch=None):
668 if not heads:
667 if not heads:
669 heads = self.heads()
668 heads = self.heads()
670 headt = [ h for h in heads ]
669 headt = [ h for h in heads ]
671 chlog = self.changelog
670 chlog = self.changelog
672 branches = {}
671 branches = {}
673 merges = []
672 merges = []
674 seenmerge = {}
673 seenmerge = {}
675
674
676 # traverse the tree once for each head, recording in the branches
675 # traverse the tree once for each head, recording in the branches
677 # dict which tags are visible from this head. The branches
676 # dict which tags are visible from this head. The branches
678 # dict also records which tags are visible from each tag
677 # dict also records which tags are visible from each tag
679 # while we traverse.
678 # while we traverse.
680 while headt or merges:
679 while headt or merges:
681 if merges:
680 if merges:
682 n, found = merges.pop()
681 n, found = merges.pop()
683 visit = [n]
682 visit = [n]
684 else:
683 else:
685 h = headt.pop()
684 h = headt.pop()
686 visit = [h]
685 visit = [h]
687 found = [h]
686 found = [h]
688 seen = {}
687 seen = {}
689 while visit:
688 while visit:
690 n = visit.pop()
689 n = visit.pop()
691 if n in seen:
690 if n in seen:
692 continue
691 continue
693 pp = chlog.parents(n)
692 pp = chlog.parents(n)
694 tags = self.nodetags(n)
693 tags = self.nodetags(n)
695 if tags:
694 if tags:
696 for x in tags:
695 for x in tags:
697 if x == 'tip':
696 if x == 'tip':
698 continue
697 continue
699 for f in found:
698 for f in found:
700 branches.setdefault(f, {})[n] = 1
699 branches.setdefault(f, {})[n] = 1
701 branches.setdefault(n, {})[n] = 1
700 branches.setdefault(n, {})[n] = 1
702 break
701 break
703 if n not in found:
702 if n not in found:
704 found.append(n)
703 found.append(n)
705 if branch in tags:
704 if branch in tags:
706 continue
705 continue
707 seen[n] = 1
706 seen[n] = 1
708 if pp[1] != nullid and n not in seenmerge:
707 if pp[1] != nullid and n not in seenmerge:
709 merges.append((pp[1], [x for x in found]))
708 merges.append((pp[1], [x for x in found]))
710 seenmerge[n] = 1
709 seenmerge[n] = 1
711 if pp[0] != nullid:
710 if pp[0] != nullid:
712 visit.append(pp[0])
711 visit.append(pp[0])
713 # traverse the branches dict, eliminating branch tags from each
712 # traverse the branches dict, eliminating branch tags from each
714 # head that are visible from another branch tag for that head.
713 # head that are visible from another branch tag for that head.
715 out = {}
714 out = {}
716 viscache = {}
715 viscache = {}
717 for h in heads:
716 for h in heads:
718 def visible(node):
717 def visible(node):
719 if node in viscache:
718 if node in viscache:
720 return viscache[node]
719 return viscache[node]
721 ret = {}
720 ret = {}
722 visit = [node]
721 visit = [node]
723 while visit:
722 while visit:
724 x = visit.pop()
723 x = visit.pop()
725 if x in viscache:
724 if x in viscache:
726 ret.update(viscache[x])
725 ret.update(viscache[x])
727 elif x not in ret:
726 elif x not in ret:
728 ret[x] = 1
727 ret[x] = 1
729 if x in branches:
728 if x in branches:
730 visit[len(visit):] = branches[x].keys()
729 visit[len(visit):] = branches[x].keys()
731 viscache[node] = ret
730 viscache[node] = ret
732 return ret
731 return ret
733 if h not in branches:
732 if h not in branches:
734 continue
733 continue
735 # O(n^2), but somewhat limited. This only searches the
734 # O(n^2), but somewhat limited. This only searches the
736 # tags visible from a specific head, not all the tags in the
735 # tags visible from a specific head, not all the tags in the
737 # whole repo.
736 # whole repo.
738 for b in branches[h]:
737 for b in branches[h]:
739 vis = False
738 vis = False
740 for bb in branches[h].keys():
739 for bb in branches[h].keys():
741 if b != bb:
740 if b != bb:
742 if b in visible(bb):
741 if b in visible(bb):
743 vis = True
742 vis = True
744 break
743 break
745 if not vis:
744 if not vis:
746 l = out.setdefault(h, [])
745 l = out.setdefault(h, [])
747 l[len(l):] = self.nodetags(b)
746 l[len(l):] = self.nodetags(b)
748 return out
747 return out
749
748
750 def branches(self, nodes):
749 def branches(self, nodes):
751 if not nodes:
750 if not nodes:
752 nodes = [self.changelog.tip()]
751 nodes = [self.changelog.tip()]
753 b = []
752 b = []
754 for n in nodes:
753 for n in nodes:
755 t = n
754 t = n
756 while n:
755 while n:
757 p = self.changelog.parents(n)
756 p = self.changelog.parents(n)
758 if p[1] != nullid or p[0] == nullid:
757 if p[1] != nullid or p[0] == nullid:
759 b.append((t, n, p[0], p[1]))
758 b.append((t, n, p[0], p[1]))
760 break
759 break
761 n = p[0]
760 n = p[0]
762 return b
761 return b
763
762
764 def between(self, pairs):
763 def between(self, pairs):
765 r = []
764 r = []
766
765
767 for top, bottom in pairs:
766 for top, bottom in pairs:
768 n, l, i = top, [], 0
767 n, l, i = top, [], 0
769 f = 1
768 f = 1
770
769
771 while n != bottom:
770 while n != bottom:
772 p = self.changelog.parents(n)[0]
771 p = self.changelog.parents(n)[0]
773 if i == f:
772 if i == f:
774 l.append(n)
773 l.append(n)
775 f = f * 2
774 f = f * 2
776 n = p
775 n = p
777 i += 1
776 i += 1
778
777
779 r.append(l)
778 r.append(l)
780
779
781 return r
780 return r
782
781
783 def findincoming(self, remote, base=None, heads=None):
782 def findincoming(self, remote, base=None, heads=None):
784 m = self.changelog.nodemap
783 m = self.changelog.nodemap
785 search = []
784 search = []
786 fetch = {}
785 fetch = {}
787 seen = {}
786 seen = {}
788 seenbranch = {}
787 seenbranch = {}
789 if base == None:
788 if base == None:
790 base = {}
789 base = {}
791
790
792 # assume we're closer to the tip than the root
791 # assume we're closer to the tip than the root
793 # and start by examining the heads
792 # and start by examining the heads
794 self.ui.status(_("searching for changes\n"))
793 self.ui.status(_("searching for changes\n"))
795
794
796 if not heads:
795 if not heads:
797 heads = remote.heads()
796 heads = remote.heads()
798
797
799 unknown = []
798 unknown = []
800 for h in heads:
799 for h in heads:
801 if h not in m:
800 if h not in m:
802 unknown.append(h)
801 unknown.append(h)
803 else:
802 else:
804 base[h] = 1
803 base[h] = 1
805
804
806 if not unknown:
805 if not unknown:
807 return None
806 return None
808
807
809 rep = {}
808 rep = {}
810 reqcnt = 0
809 reqcnt = 0
811
810
812 # search through remote branches
811 # search through remote branches
813 # a 'branch' here is a linear segment of history, with four parts:
812 # a 'branch' here is a linear segment of history, with four parts:
814 # head, root, first parent, second parent
813 # head, root, first parent, second parent
815 # (a branch always has two parents (or none) by definition)
814 # (a branch always has two parents (or none) by definition)
816 unknown = remote.branches(unknown)
815 unknown = remote.branches(unknown)
817 while unknown:
816 while unknown:
818 r = []
817 r = []
819 while unknown:
818 while unknown:
820 n = unknown.pop(0)
819 n = unknown.pop(0)
821 if n[0] in seen:
820 if n[0] in seen:
822 continue
821 continue
823
822
824 self.ui.debug(_("examining %s:%s\n")
823 self.ui.debug(_("examining %s:%s\n")
825 % (short(n[0]), short(n[1])))
824 % (short(n[0]), short(n[1])))
826 if n[0] == nullid:
825 if n[0] == nullid:
827 break
826 break
828 if n in seenbranch:
827 if n in seenbranch:
829 self.ui.debug(_("branch already found\n"))
828 self.ui.debug(_("branch already found\n"))
830 continue
829 continue
831 if n[1] and n[1] in m: # do we know the base?
830 if n[1] and n[1] in m: # do we know the base?
832 self.ui.debug(_("found incomplete branch %s:%s\n")
831 self.ui.debug(_("found incomplete branch %s:%s\n")
833 % (short(n[0]), short(n[1])))
832 % (short(n[0]), short(n[1])))
834 search.append(n) # schedule branch range for scanning
833 search.append(n) # schedule branch range for scanning
835 seenbranch[n] = 1
834 seenbranch[n] = 1
836 else:
835 else:
837 if n[1] not in seen and n[1] not in fetch:
836 if n[1] not in seen and n[1] not in fetch:
838 if n[2] in m and n[3] in m:
837 if n[2] in m and n[3] in m:
839 self.ui.debug(_("found new changeset %s\n") %
838 self.ui.debug(_("found new changeset %s\n") %
840 short(n[1]))
839 short(n[1]))
841 fetch[n[1]] = 1 # earliest unknown
840 fetch[n[1]] = 1 # earliest unknown
842 base[n[2]] = 1 # latest known
841 base[n[2]] = 1 # latest known
843 continue
842 continue
844
843
845 for a in n[2:4]:
844 for a in n[2:4]:
846 if a not in rep:
845 if a not in rep:
847 r.append(a)
846 r.append(a)
848 rep[a] = 1
847 rep[a] = 1
849
848
850 seen[n[0]] = 1
849 seen[n[0]] = 1
851
850
852 if r:
851 if r:
853 reqcnt += 1
852 reqcnt += 1
854 self.ui.debug(_("request %d: %s\n") %
853 self.ui.debug(_("request %d: %s\n") %
855 (reqcnt, " ".join(map(short, r))))
854 (reqcnt, " ".join(map(short, r))))
856 for p in range(0, len(r), 10):
855 for p in range(0, len(r), 10):
857 for b in remote.branches(r[p:p+10]):
856 for b in remote.branches(r[p:p+10]):
858 self.ui.debug(_("received %s:%s\n") %
857 self.ui.debug(_("received %s:%s\n") %
859 (short(b[0]), short(b[1])))
858 (short(b[0]), short(b[1])))
860 if b[0] in m:
859 if b[0] in m:
861 self.ui.debug(_("found base node %s\n")
860 self.ui.debug(_("found base node %s\n")
862 % short(b[0]))
861 % short(b[0]))
863 base[b[0]] = 1
862 base[b[0]] = 1
864 elif b[0] not in seen:
863 elif b[0] not in seen:
865 unknown.append(b)
864 unknown.append(b)
866
865
867 # do binary search on the branches we found
866 # do binary search on the branches we found
868 while search:
867 while search:
869 n = search.pop(0)
868 n = search.pop(0)
870 reqcnt += 1
869 reqcnt += 1
871 l = remote.between([(n[0], n[1])])[0]
870 l = remote.between([(n[0], n[1])])[0]
872 l.append(n[1])
871 l.append(n[1])
873 p = n[0]
872 p = n[0]
874 f = 1
873 f = 1
875 for i in l:
874 for i in l:
876 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
875 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
877 if i in m:
876 if i in m:
878 if f <= 2:
877 if f <= 2:
879 self.ui.debug(_("found new branch changeset %s\n") %
878 self.ui.debug(_("found new branch changeset %s\n") %
880 short(p))
879 short(p))
881 fetch[p] = 1
880 fetch[p] = 1
882 base[i] = 1
881 base[i] = 1
883 else:
882 else:
884 self.ui.debug(_("narrowed branch search to %s:%s\n")
883 self.ui.debug(_("narrowed branch search to %s:%s\n")
885 % (short(p), short(i)))
884 % (short(p), short(i)))
886 search.append((p, i))
885 search.append((p, i))
887 break
886 break
888 p, f = i, f * 2
887 p, f = i, f * 2
889
888
890 # sanity check our fetch list
889 # sanity check our fetch list
891 for f in fetch.keys():
890 for f in fetch.keys():
892 if f in m:
891 if f in m:
893 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
892 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
894
893
895 if base.keys() == [nullid]:
894 if base.keys() == [nullid]:
896 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
895 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
897
896
898 self.ui.note(_("found new changesets starting at ") +
897 self.ui.note(_("found new changesets starting at ") +
899 " ".join([short(f) for f in fetch]) + "\n")
898 " ".join([short(f) for f in fetch]) + "\n")
900
899
901 self.ui.debug(_("%d total queries\n") % reqcnt)
900 self.ui.debug(_("%d total queries\n") % reqcnt)
902
901
903 return fetch.keys()
902 return fetch.keys()
904
903
905 def findoutgoing(self, remote, base=None, heads=None):
904 def findoutgoing(self, remote, base=None, heads=None):
906 if base == None:
905 if base == None:
907 base = {}
906 base = {}
908 self.findincoming(remote, base, heads)
907 self.findincoming(remote, base, heads)
909
908
910 self.ui.debug(_("common changesets up to ")
909 self.ui.debug(_("common changesets up to ")
911 + " ".join(map(short, base.keys())) + "\n")
910 + " ".join(map(short, base.keys())) + "\n")
912
911
913 remain = dict.fromkeys(self.changelog.nodemap)
912 remain = dict.fromkeys(self.changelog.nodemap)
914
913
915 # prune everything remote has from the tree
914 # prune everything remote has from the tree
916 del remain[nullid]
915 del remain[nullid]
917 remove = base.keys()
916 remove = base.keys()
918 while remove:
917 while remove:
919 n = remove.pop(0)
918 n = remove.pop(0)
920 if n in remain:
919 if n in remain:
921 del remain[n]
920 del remain[n]
922 for p in self.changelog.parents(n):
921 for p in self.changelog.parents(n):
923 remove.append(p)
922 remove.append(p)
924
923
925 # find every node whose parents have been pruned
924 # find every node whose parents have been pruned
926 subset = []
925 subset = []
927 for n in remain:
926 for n in remain:
928 p1, p2 = self.changelog.parents(n)
927 p1, p2 = self.changelog.parents(n)
929 if p1 not in remain and p2 not in remain:
928 if p1 not in remain and p2 not in remain:
930 subset.append(n)
929 subset.append(n)
931
930
932 # this is the set of all roots we have to push
931 # this is the set of all roots we have to push
933 return subset
932 return subset
934
933
935 def pull(self, remote, heads=None):
934 def pull(self, remote, heads=None):
936 lock = self.lock()
935 lock = self.lock()
937
936
938 # if we have an empty repo, fetch everything
937 # if we have an empty repo, fetch everything
939 if self.changelog.tip() == nullid:
938 if self.changelog.tip() == nullid:
940 self.ui.status(_("requesting all changes\n"))
939 self.ui.status(_("requesting all changes\n"))
941 fetch = [nullid]
940 fetch = [nullid]
942 else:
941 else:
943 fetch = self.findincoming(remote)
942 fetch = self.findincoming(remote)
944
943
945 if not fetch:
944 if not fetch:
946 self.ui.status(_("no changes found\n"))
945 self.ui.status(_("no changes found\n"))
947 return 1
946 return 1
948
947
949 if heads is None:
948 if heads is None:
950 cg = remote.changegroup(fetch)
949 cg = remote.changegroup(fetch)
951 else:
950 else:
952 cg = remote.changegroupsubset(fetch, heads)
951 cg = remote.changegroupsubset(fetch, heads)
953 return self.addchangegroup(cg)
952 return self.addchangegroup(cg)
954
953
955 def push(self, remote, force=False):
954 def push(self, remote, force=False):
956 lock = remote.lock()
955 lock = remote.lock()
957
956
958 base = {}
957 base = {}
959 heads = remote.heads()
958 heads = remote.heads()
960 inc = self.findincoming(remote, base, heads)
959 inc = self.findincoming(remote, base, heads)
961 if not force and inc:
960 if not force and inc:
962 self.ui.warn(_("abort: unsynced remote changes!\n"))
961 self.ui.warn(_("abort: unsynced remote changes!\n"))
963 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
962 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
964 return 1
963 return 1
965
964
966 update = self.findoutgoing(remote, base)
965 update = self.findoutgoing(remote, base)
967 if not update:
966 if not update:
968 self.ui.status(_("no changes found\n"))
967 self.ui.status(_("no changes found\n"))
969 return 1
968 return 1
970 elif not force:
969 elif not force:
971 if len(heads) < len(self.changelog.heads()):
970 if len(heads) < len(self.changelog.heads()):
972 self.ui.warn(_("abort: push creates new remote branches!\n"))
971 self.ui.warn(_("abort: push creates new remote branches!\n"))
973 self.ui.status(_("(did you forget to merge?"
972 self.ui.status(_("(did you forget to merge?"
974 " use push -f to force)\n"))
973 " use push -f to force)\n"))
975 return 1
974 return 1
976
975
977 cg = self.changegroup(update)
976 cg = self.changegroup(update)
978 return remote.addchangegroup(cg)
977 return remote.addchangegroup(cg)
979
978
980 def changegroupsubset(self, bases, heads):
979 def changegroupsubset(self, bases, heads):
981 """This function generates a changegroup consisting of all the nodes
980 """This function generates a changegroup consisting of all the nodes
982 that are descendents of any of the bases, and ancestors of any of
981 that are descendents of any of the bases, and ancestors of any of
983 the heads.
982 the heads.
984
983
985 It is fairly complex as determining which filenodes and which
984 It is fairly complex as determining which filenodes and which
986 manifest nodes need to be included for the changeset to be complete
985 manifest nodes need to be included for the changeset to be complete
987 is non-trivial.
986 is non-trivial.
988
987
989 Another wrinkle is doing the reverse, figuring out which changeset in
988 Another wrinkle is doing the reverse, figuring out which changeset in
990 the changegroup a particular filenode or manifestnode belongs to."""
989 the changegroup a particular filenode or manifestnode belongs to."""
991
990
992 # Set up some initial variables
991 # Set up some initial variables
993 # Make it easy to refer to self.changelog
992 # Make it easy to refer to self.changelog
994 cl = self.changelog
993 cl = self.changelog
995 # msng is short for missing - compute the list of changesets in this
994 # msng is short for missing - compute the list of changesets in this
996 # changegroup.
995 # changegroup.
997 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
996 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
998 # Some bases may turn out to be superfluous, and some heads may be
997 # Some bases may turn out to be superfluous, and some heads may be
999 # too. nodesbetween will return the minimal set of bases and heads
998 # too. nodesbetween will return the minimal set of bases and heads
1000 # necessary to re-create the changegroup.
999 # necessary to re-create the changegroup.
1001
1000
1002 # Known heads are the list of heads that it is assumed the recipient
1001 # Known heads are the list of heads that it is assumed the recipient
1003 # of this changegroup will know about.
1002 # of this changegroup will know about.
1004 knownheads = {}
1003 knownheads = {}
1005 # We assume that all parents of bases are known heads.
1004 # We assume that all parents of bases are known heads.
1006 for n in bases:
1005 for n in bases:
1007 for p in cl.parents(n):
1006 for p in cl.parents(n):
1008 if p != nullid:
1007 if p != nullid:
1009 knownheads[p] = 1
1008 knownheads[p] = 1
1010 knownheads = knownheads.keys()
1009 knownheads = knownheads.keys()
1011 if knownheads:
1010 if knownheads:
1012 # Now that we know what heads are known, we can compute which
1011 # Now that we know what heads are known, we can compute which
1013 # changesets are known. The recipient must know about all
1012 # changesets are known. The recipient must know about all
1014 # changesets required to reach the known heads from the null
1013 # changesets required to reach the known heads from the null
1015 # changeset.
1014 # changeset.
1016 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1015 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1017 junk = None
1016 junk = None
1018 # Transform the list into an ersatz set.
1017 # Transform the list into an ersatz set.
1019 has_cl_set = dict.fromkeys(has_cl_set)
1018 has_cl_set = dict.fromkeys(has_cl_set)
1020 else:
1019 else:
1021 # If there were no known heads, the recipient cannot be assumed to
1020 # If there were no known heads, the recipient cannot be assumed to
1022 # know about any changesets.
1021 # know about any changesets.
1023 has_cl_set = {}
1022 has_cl_set = {}
1024
1023
1025 # Make it easy to refer to self.manifest
1024 # Make it easy to refer to self.manifest
1026 mnfst = self.manifest
1025 mnfst = self.manifest
1027 # We don't know which manifests are missing yet
1026 # We don't know which manifests are missing yet
1028 msng_mnfst_set = {}
1027 msng_mnfst_set = {}
1029 # Nor do we know which filenodes are missing.
1028 # Nor do we know which filenodes are missing.
1030 msng_filenode_set = {}
1029 msng_filenode_set = {}
1031
1030
1032 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1031 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1033 junk = None
1032 junk = None
1034
1033
1035 # A changeset always belongs to itself, so the changenode lookup
1034 # A changeset always belongs to itself, so the changenode lookup
1036 # function for a changenode is identity.
1035 # function for a changenode is identity.
1037 def identity(x):
1036 def identity(x):
1038 return x
1037 return x
1039
1038
1040 # A function generating function. Sets up an environment for the
1039 # A function generating function. Sets up an environment for the
1041 # inner function.
1040 # inner function.
1042 def cmp_by_rev_func(revlog):
1041 def cmp_by_rev_func(revlog):
1043 # Compare two nodes by their revision number in the environment's
1042 # Compare two nodes by their revision number in the environment's
1044 # revision history. Since the revision number both represents the
1043 # revision history. Since the revision number both represents the
1045 # most efficient order to read the nodes in, and represents a
1044 # most efficient order to read the nodes in, and represents a
1046 # topological sorting of the nodes, this function is often useful.
1045 # topological sorting of the nodes, this function is often useful.
1047 def cmp_by_rev(a, b):
1046 def cmp_by_rev(a, b):
1048 return cmp(revlog.rev(a), revlog.rev(b))
1047 return cmp(revlog.rev(a), revlog.rev(b))
1049 return cmp_by_rev
1048 return cmp_by_rev
1050
1049
1051 # If we determine that a particular file or manifest node must be a
1050 # If we determine that a particular file or manifest node must be a
1052 # node that the recipient of the changegroup will already have, we can
1051 # node that the recipient of the changegroup will already have, we can
1053 # also assume the recipient will have all the parents. This function
1052 # also assume the recipient will have all the parents. This function
1054 # prunes them from the set of missing nodes.
1053 # prunes them from the set of missing nodes.
1055 def prune_parents(revlog, hasset, msngset):
1054 def prune_parents(revlog, hasset, msngset):
1056 haslst = hasset.keys()
1055 haslst = hasset.keys()
1057 haslst.sort(cmp_by_rev_func(revlog))
1056 haslst.sort(cmp_by_rev_func(revlog))
1058 for node in haslst:
1057 for node in haslst:
1059 parentlst = [p for p in revlog.parents(node) if p != nullid]
1058 parentlst = [p for p in revlog.parents(node) if p != nullid]
1060 while parentlst:
1059 while parentlst:
1061 n = parentlst.pop()
1060 n = parentlst.pop()
1062 if n not in hasset:
1061 if n not in hasset:
1063 hasset[n] = 1
1062 hasset[n] = 1
1064 p = [p for p in revlog.parents(n) if p != nullid]
1063 p = [p for p in revlog.parents(n) if p != nullid]
1065 parentlst.extend(p)
1064 parentlst.extend(p)
1066 for n in hasset:
1065 for n in hasset:
1067 msngset.pop(n, None)
1066 msngset.pop(n, None)
1068
1067
1069 # This is a function generating function used to set up an environment
1068 # This is a function generating function used to set up an environment
1070 # for the inner function to execute in.
1069 # for the inner function to execute in.
1071 def manifest_and_file_collector(changedfileset):
1070 def manifest_and_file_collector(changedfileset):
1072 # This is an information gathering function that gathers
1071 # This is an information gathering function that gathers
1073 # information from each changeset node that goes out as part of
1072 # information from each changeset node that goes out as part of
1074 # the changegroup. The information gathered is a list of which
1073 # the changegroup. The information gathered is a list of which
1075 # manifest nodes are potentially required (the recipient may
1074 # manifest nodes are potentially required (the recipient may
1076 # already have them) and total list of all files which were
1075 # already have them) and total list of all files which were
1077 # changed in any changeset in the changegroup.
1076 # changed in any changeset in the changegroup.
1078 #
1077 #
1079 # We also remember the first changenode we saw any manifest
1078 # We also remember the first changenode we saw any manifest
1080 # referenced by so we can later determine which changenode 'owns'
1079 # referenced by so we can later determine which changenode 'owns'
1081 # the manifest.
1080 # the manifest.
1082 def collect_manifests_and_files(clnode):
1081 def collect_manifests_and_files(clnode):
1083 c = cl.read(clnode)
1082 c = cl.read(clnode)
1084 for f in c[3]:
1083 for f in c[3]:
1085 # This is to make sure we only have one instance of each
1084 # This is to make sure we only have one instance of each
1086 # filename string for each filename.
1085 # filename string for each filename.
1087 changedfileset.setdefault(f, f)
1086 changedfileset.setdefault(f, f)
1088 msng_mnfst_set.setdefault(c[0], clnode)
1087 msng_mnfst_set.setdefault(c[0], clnode)
1089 return collect_manifests_and_files
1088 return collect_manifests_and_files
1090
1089
1091 # Figure out which manifest nodes (of the ones we think might be part
1090 # Figure out which manifest nodes (of the ones we think might be part
1092 # of the changegroup) the recipient must know about and remove them
1091 # of the changegroup) the recipient must know about and remove them
1093 # from the changegroup.
1092 # from the changegroup.
1094 def prune_manifests():
1093 def prune_manifests():
1095 has_mnfst_set = {}
1094 has_mnfst_set = {}
1096 for n in msng_mnfst_set:
1095 for n in msng_mnfst_set:
1097 # If a 'missing' manifest thinks it belongs to a changenode
1096 # If a 'missing' manifest thinks it belongs to a changenode
1098 # the recipient is assumed to have, obviously the recipient
1097 # the recipient is assumed to have, obviously the recipient
1099 # must have that manifest.
1098 # must have that manifest.
1100 linknode = cl.node(mnfst.linkrev(n))
1099 linknode = cl.node(mnfst.linkrev(n))
1101 if linknode in has_cl_set:
1100 if linknode in has_cl_set:
1102 has_mnfst_set[n] = 1
1101 has_mnfst_set[n] = 1
1103 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1102 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1104
1103
1105 # Use the information collected in collect_manifests_and_files to say
1104 # Use the information collected in collect_manifests_and_files to say
1106 # which changenode any manifestnode belongs to.
1105 # which changenode any manifestnode belongs to.
1107 def lookup_manifest_link(mnfstnode):
1106 def lookup_manifest_link(mnfstnode):
1108 return msng_mnfst_set[mnfstnode]
1107 return msng_mnfst_set[mnfstnode]
1109
1108
1110 # A function generating function that sets up the initial environment
1109 # A function generating function that sets up the initial environment
1111 # the inner function.
1110 # the inner function.
1112 def filenode_collector(changedfiles):
1111 def filenode_collector(changedfiles):
1113 next_rev = [0]
1112 next_rev = [0]
1114 # This gathers information from each manifestnode included in the
1113 # This gathers information from each manifestnode included in the
1115 # changegroup about which filenodes the manifest node references
1114 # changegroup about which filenodes the manifest node references
1116 # so we can include those in the changegroup too.
1115 # so we can include those in the changegroup too.
1117 #
1116 #
1118 # It also remembers which changenode each filenode belongs to. It
1117 # It also remembers which changenode each filenode belongs to. It
1119 # does this by assuming the a filenode belongs to the changenode
1118 # does this by assuming the a filenode belongs to the changenode
1120 # the first manifest that references it belongs to.
1119 # the first manifest that references it belongs to.
1121 def collect_msng_filenodes(mnfstnode):
1120 def collect_msng_filenodes(mnfstnode):
1122 r = mnfst.rev(mnfstnode)
1121 r = mnfst.rev(mnfstnode)
1123 if r == next_rev[0]:
1122 if r == next_rev[0]:
1124 # If the last rev we looked at was the one just previous,
1123 # If the last rev we looked at was the one just previous,
1125 # we only need to see a diff.
1124 # we only need to see a diff.
1126 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1125 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1127 # For each line in the delta
1126 # For each line in the delta
1128 for dline in delta.splitlines():
1127 for dline in delta.splitlines():
1129 # get the filename and filenode for that line
1128 # get the filename and filenode for that line
1130 f, fnode = dline.split('\0')
1129 f, fnode = dline.split('\0')
1131 fnode = bin(fnode[:40])
1130 fnode = bin(fnode[:40])
1132 f = changedfiles.get(f, None)
1131 f = changedfiles.get(f, None)
1133 # And if the file is in the list of files we care
1132 # And if the file is in the list of files we care
1134 # about.
1133 # about.
1135 if f is not None:
1134 if f is not None:
1136 # Get the changenode this manifest belongs to
1135 # Get the changenode this manifest belongs to
1137 clnode = msng_mnfst_set[mnfstnode]
1136 clnode = msng_mnfst_set[mnfstnode]
1138 # Create the set of filenodes for the file if
1137 # Create the set of filenodes for the file if
1139 # there isn't one already.
1138 # there isn't one already.
1140 ndset = msng_filenode_set.setdefault(f, {})
1139 ndset = msng_filenode_set.setdefault(f, {})
1141 # And set the filenode's changelog node to the
1140 # And set the filenode's changelog node to the
1142 # manifest's if it hasn't been set already.
1141 # manifest's if it hasn't been set already.
1143 ndset.setdefault(fnode, clnode)
1142 ndset.setdefault(fnode, clnode)
1144 else:
1143 else:
1145 # Otherwise we need a full manifest.
1144 # Otherwise we need a full manifest.
1146 m = mnfst.read(mnfstnode)
1145 m = mnfst.read(mnfstnode)
1147 # For every file in we care about.
1146 # For every file in we care about.
1148 for f in changedfiles:
1147 for f in changedfiles:
1149 fnode = m.get(f, None)
1148 fnode = m.get(f, None)
1150 # If it's in the manifest
1149 # If it's in the manifest
1151 if fnode is not None:
1150 if fnode is not None:
1152 # See comments above.
1151 # See comments above.
1153 clnode = msng_mnfst_set[mnfstnode]
1152 clnode = msng_mnfst_set[mnfstnode]
1154 ndset = msng_filenode_set.setdefault(f, {})
1153 ndset = msng_filenode_set.setdefault(f, {})
1155 ndset.setdefault(fnode, clnode)
1154 ndset.setdefault(fnode, clnode)
1156 # Remember the revision we hope to see next.
1155 # Remember the revision we hope to see next.
1157 next_rev[0] = r + 1
1156 next_rev[0] = r + 1
1158 return collect_msng_filenodes
1157 return collect_msng_filenodes
1159
1158
1160 # We have a list of filenodes we think we need for a file, lets remove
1159 # We have a list of filenodes we think we need for a file, lets remove
1161 # all those we now the recipient must have.
1160 # all those we now the recipient must have.
1162 def prune_filenodes(f, filerevlog):
1161 def prune_filenodes(f, filerevlog):
1163 msngset = msng_filenode_set[f]
1162 msngset = msng_filenode_set[f]
1164 hasset = {}
1163 hasset = {}
1165 # If a 'missing' filenode thinks it belongs to a changenode we
1164 # If a 'missing' filenode thinks it belongs to a changenode we
1166 # assume the recipient must have, then the recipient must have
1165 # assume the recipient must have, then the recipient must have
1167 # that filenode.
1166 # that filenode.
1168 for n in msngset:
1167 for n in msngset:
1169 clnode = cl.node(filerevlog.linkrev(n))
1168 clnode = cl.node(filerevlog.linkrev(n))
1170 if clnode in has_cl_set:
1169 if clnode in has_cl_set:
1171 hasset[n] = 1
1170 hasset[n] = 1
1172 prune_parents(filerevlog, hasset, msngset)
1171 prune_parents(filerevlog, hasset, msngset)
1173
1172
1174 # A function generator function that sets up the a context for the
1173 # A function generator function that sets up the a context for the
1175 # inner function.
1174 # inner function.
1176 def lookup_filenode_link_func(fname):
1175 def lookup_filenode_link_func(fname):
1177 msngset = msng_filenode_set[fname]
1176 msngset = msng_filenode_set[fname]
1178 # Lookup the changenode the filenode belongs to.
1177 # Lookup the changenode the filenode belongs to.
1179 def lookup_filenode_link(fnode):
1178 def lookup_filenode_link(fnode):
1180 return msngset[fnode]
1179 return msngset[fnode]
1181 return lookup_filenode_link
1180 return lookup_filenode_link
1182
1181
1183 # Now that we have all theses utility functions to help out and
1182 # Now that we have all theses utility functions to help out and
1184 # logically divide up the task, generate the group.
1183 # logically divide up the task, generate the group.
1185 def gengroup():
1184 def gengroup():
1186 # The set of changed files starts empty.
1185 # The set of changed files starts empty.
1187 changedfiles = {}
1186 changedfiles = {}
1188 # Create a changenode group generator that will call our functions
1187 # Create a changenode group generator that will call our functions
1189 # back to lookup the owning changenode and collect information.
1188 # back to lookup the owning changenode and collect information.
1190 group = cl.group(msng_cl_lst, identity,
1189 group = cl.group(msng_cl_lst, identity,
1191 manifest_and_file_collector(changedfiles))
1190 manifest_and_file_collector(changedfiles))
1192 for chnk in group:
1191 for chnk in group:
1193 yield chnk
1192 yield chnk
1194
1193
1195 # The list of manifests has been collected by the generator
1194 # The list of manifests has been collected by the generator
1196 # calling our functions back.
1195 # calling our functions back.
1197 prune_manifests()
1196 prune_manifests()
1198 msng_mnfst_lst = msng_mnfst_set.keys()
1197 msng_mnfst_lst = msng_mnfst_set.keys()
1199 # Sort the manifestnodes by revision number.
1198 # Sort the manifestnodes by revision number.
1200 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1199 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1201 # Create a generator for the manifestnodes that calls our lookup
1200 # Create a generator for the manifestnodes that calls our lookup
1202 # and data collection functions back.
1201 # and data collection functions back.
1203 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1202 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1204 filenode_collector(changedfiles))
1203 filenode_collector(changedfiles))
1205 for chnk in group:
1204 for chnk in group:
1206 yield chnk
1205 yield chnk
1207
1206
1208 # These are no longer needed, dereference and toss the memory for
1207 # These are no longer needed, dereference and toss the memory for
1209 # them.
1208 # them.
1210 msng_mnfst_lst = None
1209 msng_mnfst_lst = None
1211 msng_mnfst_set.clear()
1210 msng_mnfst_set.clear()
1212
1211
1213 changedfiles = changedfiles.keys()
1212 changedfiles = changedfiles.keys()
1214 changedfiles.sort()
1213 changedfiles.sort()
1215 # Go through all our files in order sorted by name.
1214 # Go through all our files in order sorted by name.
1216 for fname in changedfiles:
1215 for fname in changedfiles:
1217 filerevlog = self.file(fname)
1216 filerevlog = self.file(fname)
1218 # Toss out the filenodes that the recipient isn't really
1217 # Toss out the filenodes that the recipient isn't really
1219 # missing.
1218 # missing.
1220 if msng_filenode_set.has_key(fname):
1219 if msng_filenode_set.has_key(fname):
1221 prune_filenodes(fname, filerevlog)
1220 prune_filenodes(fname, filerevlog)
1222 msng_filenode_lst = msng_filenode_set[fname].keys()
1221 msng_filenode_lst = msng_filenode_set[fname].keys()
1223 else:
1222 else:
1224 msng_filenode_lst = []
1223 msng_filenode_lst = []
1225 # If any filenodes are left, generate the group for them,
1224 # If any filenodes are left, generate the group for them,
1226 # otherwise don't bother.
1225 # otherwise don't bother.
1227 if len(msng_filenode_lst) > 0:
1226 if len(msng_filenode_lst) > 0:
1228 yield struct.pack(">l", len(fname) + 4) + fname
1227 yield struct.pack(">l", len(fname) + 4) + fname
1229 # Sort the filenodes by their revision #
1228 # Sort the filenodes by their revision #
1230 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1229 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1231 # Create a group generator and only pass in a changenode
1230 # Create a group generator and only pass in a changenode
1232 # lookup function as we need to collect no information
1231 # lookup function as we need to collect no information
1233 # from filenodes.
1232 # from filenodes.
1234 group = filerevlog.group(msng_filenode_lst,
1233 group = filerevlog.group(msng_filenode_lst,
1235 lookup_filenode_link_func(fname))
1234 lookup_filenode_link_func(fname))
1236 for chnk in group:
1235 for chnk in group:
1237 yield chnk
1236 yield chnk
1238 if msng_filenode_set.has_key(fname):
1237 if msng_filenode_set.has_key(fname):
1239 # Don't need this anymore, toss it to free memory.
1238 # Don't need this anymore, toss it to free memory.
1240 del msng_filenode_set[fname]
1239 del msng_filenode_set[fname]
1241 # Signal that no more groups are left.
1240 # Signal that no more groups are left.
1242 yield struct.pack(">l", 0)
1241 yield struct.pack(">l", 0)
1243
1242
1244 return util.chunkbuffer(gengroup())
1243 return util.chunkbuffer(gengroup())
1245
1244
1246 def changegroup(self, basenodes):
1245 def changegroup(self, basenodes):
1247 """Generate a changegroup of all nodes that we have that a recipient
1246 """Generate a changegroup of all nodes that we have that a recipient
1248 doesn't.
1247 doesn't.
1249
1248
1250 This is much easier than the previous function as we can assume that
1249 This is much easier than the previous function as we can assume that
1251 the recipient has any changenode we aren't sending them."""
1250 the recipient has any changenode we aren't sending them."""
1252 cl = self.changelog
1251 cl = self.changelog
1253 nodes = cl.nodesbetween(basenodes, None)[0]
1252 nodes = cl.nodesbetween(basenodes, None)[0]
1254 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1253 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1255
1254
1256 def identity(x):
1255 def identity(x):
1257 return x
1256 return x
1258
1257
1259 def gennodelst(revlog):
1258 def gennodelst(revlog):
1260 for r in xrange(0, revlog.count()):
1259 for r in xrange(0, revlog.count()):
1261 n = revlog.node(r)
1260 n = revlog.node(r)
1262 if revlog.linkrev(n) in revset:
1261 if revlog.linkrev(n) in revset:
1263 yield n
1262 yield n
1264
1263
1265 def changed_file_collector(changedfileset):
1264 def changed_file_collector(changedfileset):
1266 def collect_changed_files(clnode):
1265 def collect_changed_files(clnode):
1267 c = cl.read(clnode)
1266 c = cl.read(clnode)
1268 for fname in c[3]:
1267 for fname in c[3]:
1269 changedfileset[fname] = 1
1268 changedfileset[fname] = 1
1270 return collect_changed_files
1269 return collect_changed_files
1271
1270
1272 def lookuprevlink_func(revlog):
1271 def lookuprevlink_func(revlog):
1273 def lookuprevlink(n):
1272 def lookuprevlink(n):
1274 return cl.node(revlog.linkrev(n))
1273 return cl.node(revlog.linkrev(n))
1275 return lookuprevlink
1274 return lookuprevlink
1276
1275
1277 def gengroup():
1276 def gengroup():
1278 # construct a list of all changed files
1277 # construct a list of all changed files
1279 changedfiles = {}
1278 changedfiles = {}
1280
1279
1281 for chnk in cl.group(nodes, identity,
1280 for chnk in cl.group(nodes, identity,
1282 changed_file_collector(changedfiles)):
1281 changed_file_collector(changedfiles)):
1283 yield chnk
1282 yield chnk
1284 changedfiles = changedfiles.keys()
1283 changedfiles = changedfiles.keys()
1285 changedfiles.sort()
1284 changedfiles.sort()
1286
1285
1287 mnfst = self.manifest
1286 mnfst = self.manifest
1288 nodeiter = gennodelst(mnfst)
1287 nodeiter = gennodelst(mnfst)
1289 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1288 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1290 yield chnk
1289 yield chnk
1291
1290
1292 for fname in changedfiles:
1291 for fname in changedfiles:
1293 filerevlog = self.file(fname)
1292 filerevlog = self.file(fname)
1294 nodeiter = gennodelst(filerevlog)
1293 nodeiter = gennodelst(filerevlog)
1295 nodeiter = list(nodeiter)
1294 nodeiter = list(nodeiter)
1296 if nodeiter:
1295 if nodeiter:
1297 yield struct.pack(">l", len(fname) + 4) + fname
1296 yield struct.pack(">l", len(fname) + 4) + fname
1298 lookup = lookuprevlink_func(filerevlog)
1297 lookup = lookuprevlink_func(filerevlog)
1299 for chnk in filerevlog.group(nodeiter, lookup):
1298 for chnk in filerevlog.group(nodeiter, lookup):
1300 yield chnk
1299 yield chnk
1301
1300
1302 yield struct.pack(">l", 0)
1301 yield struct.pack(">l", 0)
1303
1302
1304 return util.chunkbuffer(gengroup())
1303 return util.chunkbuffer(gengroup())
1305
1304
1306 def addchangegroup(self, source):
1305 def addchangegroup(self, source):
1307
1306
1308 def getchunk():
1307 def getchunk():
1309 d = source.read(4)
1308 d = source.read(4)
1310 if not d:
1309 if not d:
1311 return ""
1310 return ""
1312 l = struct.unpack(">l", d)[0]
1311 l = struct.unpack(">l", d)[0]
1313 if l <= 4:
1312 if l <= 4:
1314 return ""
1313 return ""
1315 d = source.read(l - 4)
1314 d = source.read(l - 4)
1316 if len(d) < l - 4:
1315 if len(d) < l - 4:
1317 raise repo.RepoError(_("premature EOF reading chunk"
1316 raise repo.RepoError(_("premature EOF reading chunk"
1318 " (got %d bytes, expected %d)")
1317 " (got %d bytes, expected %d)")
1319 % (len(d), l - 4))
1318 % (len(d), l - 4))
1320 return d
1319 return d
1321
1320
1322 def getgroup():
1321 def getgroup():
1323 while 1:
1322 while 1:
1324 c = getchunk()
1323 c = getchunk()
1325 if not c:
1324 if not c:
1326 break
1325 break
1327 yield c
1326 yield c
1328
1327
1329 def csmap(x):
1328 def csmap(x):
1330 self.ui.debug(_("add changeset %s\n") % short(x))
1329 self.ui.debug(_("add changeset %s\n") % short(x))
1331 return self.changelog.count()
1330 return self.changelog.count()
1332
1331
1333 def revmap(x):
1332 def revmap(x):
1334 return self.changelog.rev(x)
1333 return self.changelog.rev(x)
1335
1334
1336 if not source:
1335 if not source:
1337 return
1336 return
1338 changesets = files = revisions = 0
1337 changesets = files = revisions = 0
1339
1338
1340 tr = self.transaction()
1339 tr = self.transaction()
1341
1340
1342 oldheads = len(self.changelog.heads())
1341 oldheads = len(self.changelog.heads())
1343
1342
1344 # pull off the changeset group
1343 # pull off the changeset group
1345 self.ui.status(_("adding changesets\n"))
1344 self.ui.status(_("adding changesets\n"))
1346 co = self.changelog.tip()
1345 co = self.changelog.tip()
1347 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1346 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1348 cnr, cor = map(self.changelog.rev, (cn, co))
1347 cnr, cor = map(self.changelog.rev, (cn, co))
1349 if cn == nullid:
1348 if cn == nullid:
1350 cnr = cor
1349 cnr = cor
1351 changesets = cnr - cor
1350 changesets = cnr - cor
1352
1351
1353 # pull off the manifest group
1352 # pull off the manifest group
1354 self.ui.status(_("adding manifests\n"))
1353 self.ui.status(_("adding manifests\n"))
1355 mm = self.manifest.tip()
1354 mm = self.manifest.tip()
1356 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1355 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1357
1356
1358 # process the files
1357 # process the files
1359 self.ui.status(_("adding file changes\n"))
1358 self.ui.status(_("adding file changes\n"))
1360 while 1:
1359 while 1:
1361 f = getchunk()
1360 f = getchunk()
1362 if not f:
1361 if not f:
1363 break
1362 break
1364 self.ui.debug(_("adding %s revisions\n") % f)
1363 self.ui.debug(_("adding %s revisions\n") % f)
1365 fl = self.file(f)
1364 fl = self.file(f)
1366 o = fl.count()
1365 o = fl.count()
1367 n = fl.addgroup(getgroup(), revmap, tr)
1366 n = fl.addgroup(getgroup(), revmap, tr)
1368 revisions += fl.count() - o
1367 revisions += fl.count() - o
1369 files += 1
1368 files += 1
1370
1369
1371 newheads = len(self.changelog.heads())
1370 newheads = len(self.changelog.heads())
1372 heads = ""
1371 heads = ""
1373 if oldheads and newheads > oldheads:
1372 if oldheads and newheads > oldheads:
1374 heads = _(" (+%d heads)") % (newheads - oldheads)
1373 heads = _(" (+%d heads)") % (newheads - oldheads)
1375
1374
1376 self.ui.status(_("added %d changesets"
1375 self.ui.status(_("added %d changesets"
1377 " with %d changes to %d files%s\n")
1376 " with %d changes to %d files%s\n")
1378 % (changesets, revisions, files, heads))
1377 % (changesets, revisions, files, heads))
1379
1378
1380 tr.close()
1379 tr.close()
1381
1380
1382 if changesets > 0:
1381 if changesets > 0:
1383 if not self.hook("changegroup",
1382 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1384 node=hex(self.changelog.node(cor+1))):
1385 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1386 return 1
1387
1383
1388 for i in range(cor + 1, cnr + 1):
1384 for i in range(cor + 1, cnr + 1):
1389 self.hook("incoming", node=hex(self.changelog.node(i)))
1385 self.hook("incoming", node=hex(self.changelog.node(i)))
1390
1386
1391 return
1392
1393 def update(self, node, allow=False, force=False, choose=None,
1387 def update(self, node, allow=False, force=False, choose=None,
1394 moddirstate=True, forcemerge=False, wlock=None):
1388 moddirstate=True, forcemerge=False, wlock=None):
1395 pl = self.dirstate.parents()
1389 pl = self.dirstate.parents()
1396 if not force and pl[1] != nullid:
1390 if not force and pl[1] != nullid:
1397 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1391 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1398 return 1
1392 return 1
1399
1393
1400 err = False
1394 err = False
1401
1395
1402 p1, p2 = pl[0], node
1396 p1, p2 = pl[0], node
1403 pa = self.changelog.ancestor(p1, p2)
1397 pa = self.changelog.ancestor(p1, p2)
1404 m1n = self.changelog.read(p1)[0]
1398 m1n = self.changelog.read(p1)[0]
1405 m2n = self.changelog.read(p2)[0]
1399 m2n = self.changelog.read(p2)[0]
1406 man = self.manifest.ancestor(m1n, m2n)
1400 man = self.manifest.ancestor(m1n, m2n)
1407 m1 = self.manifest.read(m1n)
1401 m1 = self.manifest.read(m1n)
1408 mf1 = self.manifest.readflags(m1n)
1402 mf1 = self.manifest.readflags(m1n)
1409 m2 = self.manifest.read(m2n).copy()
1403 m2 = self.manifest.read(m2n).copy()
1410 mf2 = self.manifest.readflags(m2n)
1404 mf2 = self.manifest.readflags(m2n)
1411 ma = self.manifest.read(man)
1405 ma = self.manifest.read(man)
1412 mfa = self.manifest.readflags(man)
1406 mfa = self.manifest.readflags(man)
1413
1407
1414 modified, added, removed, deleted, unknown = self.changes()
1408 modified, added, removed, deleted, unknown = self.changes()
1415
1409
1416 # is this a jump, or a merge? i.e. is there a linear path
1410 # is this a jump, or a merge? i.e. is there a linear path
1417 # from p1 to p2?
1411 # from p1 to p2?
1418 linear_path = (pa == p1 or pa == p2)
1412 linear_path = (pa == p1 or pa == p2)
1419
1413
1420 if allow and linear_path:
1414 if allow and linear_path:
1421 raise util.Abort(_("there is nothing to merge, "
1415 raise util.Abort(_("there is nothing to merge, "
1422 "just use 'hg update'"))
1416 "just use 'hg update'"))
1423 if allow and not forcemerge:
1417 if allow and not forcemerge:
1424 if modified or added or removed:
1418 if modified or added or removed:
1425 raise util.Abort(_("outstanding uncommited changes"))
1419 raise util.Abort(_("outstanding uncommited changes"))
1426 if not forcemerge and not force:
1420 if not forcemerge and not force:
1427 for f in unknown:
1421 for f in unknown:
1428 if f in m2:
1422 if f in m2:
1429 t1 = self.wread(f)
1423 t1 = self.wread(f)
1430 t2 = self.file(f).read(m2[f])
1424 t2 = self.file(f).read(m2[f])
1431 if cmp(t1, t2) != 0:
1425 if cmp(t1, t2) != 0:
1432 raise util.Abort(_("'%s' already exists in the working"
1426 raise util.Abort(_("'%s' already exists in the working"
1433 " dir and differs from remote") % f)
1427 " dir and differs from remote") % f)
1434
1428
1435 # resolve the manifest to determine which files
1429 # resolve the manifest to determine which files
1436 # we care about merging
1430 # we care about merging
1437 self.ui.note(_("resolving manifests\n"))
1431 self.ui.note(_("resolving manifests\n"))
1438 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1432 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1439 (force, allow, moddirstate, linear_path))
1433 (force, allow, moddirstate, linear_path))
1440 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1434 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1441 (short(man), short(m1n), short(m2n)))
1435 (short(man), short(m1n), short(m2n)))
1442
1436
1443 merge = {}
1437 merge = {}
1444 get = {}
1438 get = {}
1445 remove = []
1439 remove = []
1446
1440
1447 # construct a working dir manifest
1441 # construct a working dir manifest
1448 mw = m1.copy()
1442 mw = m1.copy()
1449 mfw = mf1.copy()
1443 mfw = mf1.copy()
1450 umap = dict.fromkeys(unknown)
1444 umap = dict.fromkeys(unknown)
1451
1445
1452 for f in added + modified + unknown:
1446 for f in added + modified + unknown:
1453 mw[f] = ""
1447 mw[f] = ""
1454 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1448 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1455
1449
1456 if moddirstate and not wlock:
1450 if moddirstate and not wlock:
1457 wlock = self.wlock()
1451 wlock = self.wlock()
1458
1452
1459 for f in deleted + removed:
1453 for f in deleted + removed:
1460 if f in mw:
1454 if f in mw:
1461 del mw[f]
1455 del mw[f]
1462
1456
1463 # If we're jumping between revisions (as opposed to merging),
1457 # If we're jumping between revisions (as opposed to merging),
1464 # and if neither the working directory nor the target rev has
1458 # and if neither the working directory nor the target rev has
1465 # the file, then we need to remove it from the dirstate, to
1459 # the file, then we need to remove it from the dirstate, to
1466 # prevent the dirstate from listing the file when it is no
1460 # prevent the dirstate from listing the file when it is no
1467 # longer in the manifest.
1461 # longer in the manifest.
1468 if moddirstate and linear_path and f not in m2:
1462 if moddirstate and linear_path and f not in m2:
1469 self.dirstate.forget((f,))
1463 self.dirstate.forget((f,))
1470
1464
1471 # Compare manifests
1465 # Compare manifests
1472 for f, n in mw.iteritems():
1466 for f, n in mw.iteritems():
1473 if choose and not choose(f):
1467 if choose and not choose(f):
1474 continue
1468 continue
1475 if f in m2:
1469 if f in m2:
1476 s = 0
1470 s = 0
1477
1471
1478 # is the wfile new since m1, and match m2?
1472 # is the wfile new since m1, and match m2?
1479 if f not in m1:
1473 if f not in m1:
1480 t1 = self.wread(f)
1474 t1 = self.wread(f)
1481 t2 = self.file(f).read(m2[f])
1475 t2 = self.file(f).read(m2[f])
1482 if cmp(t1, t2) == 0:
1476 if cmp(t1, t2) == 0:
1483 n = m2[f]
1477 n = m2[f]
1484 del t1, t2
1478 del t1, t2
1485
1479
1486 # are files different?
1480 # are files different?
1487 if n != m2[f]:
1481 if n != m2[f]:
1488 a = ma.get(f, nullid)
1482 a = ma.get(f, nullid)
1489 # are both different from the ancestor?
1483 # are both different from the ancestor?
1490 if n != a and m2[f] != a:
1484 if n != a and m2[f] != a:
1491 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1485 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1492 # merge executable bits
1486 # merge executable bits
1493 # "if we changed or they changed, change in merge"
1487 # "if we changed or they changed, change in merge"
1494 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1488 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1495 mode = ((a^b) | (a^c)) ^ a
1489 mode = ((a^b) | (a^c)) ^ a
1496 merge[f] = (m1.get(f, nullid), m2[f], mode)
1490 merge[f] = (m1.get(f, nullid), m2[f], mode)
1497 s = 1
1491 s = 1
1498 # are we clobbering?
1492 # are we clobbering?
1499 # is remote's version newer?
1493 # is remote's version newer?
1500 # or are we going back in time?
1494 # or are we going back in time?
1501 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1495 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1502 self.ui.debug(_(" remote %s is newer, get\n") % f)
1496 self.ui.debug(_(" remote %s is newer, get\n") % f)
1503 get[f] = m2[f]
1497 get[f] = m2[f]
1504 s = 1
1498 s = 1
1505 elif f in umap:
1499 elif f in umap:
1506 # this unknown file is the same as the checkout
1500 # this unknown file is the same as the checkout
1507 get[f] = m2[f]
1501 get[f] = m2[f]
1508
1502
1509 if not s and mfw[f] != mf2[f]:
1503 if not s and mfw[f] != mf2[f]:
1510 if force:
1504 if force:
1511 self.ui.debug(_(" updating permissions for %s\n") % f)
1505 self.ui.debug(_(" updating permissions for %s\n") % f)
1512 util.set_exec(self.wjoin(f), mf2[f])
1506 util.set_exec(self.wjoin(f), mf2[f])
1513 else:
1507 else:
1514 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1508 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1515 mode = ((a^b) | (a^c)) ^ a
1509 mode = ((a^b) | (a^c)) ^ a
1516 if mode != b:
1510 if mode != b:
1517 self.ui.debug(_(" updating permissions for %s\n")
1511 self.ui.debug(_(" updating permissions for %s\n")
1518 % f)
1512 % f)
1519 util.set_exec(self.wjoin(f), mode)
1513 util.set_exec(self.wjoin(f), mode)
1520 del m2[f]
1514 del m2[f]
1521 elif f in ma:
1515 elif f in ma:
1522 if n != ma[f]:
1516 if n != ma[f]:
1523 r = _("d")
1517 r = _("d")
1524 if not force and (linear_path or allow):
1518 if not force and (linear_path or allow):
1525 r = self.ui.prompt(
1519 r = self.ui.prompt(
1526 (_(" local changed %s which remote deleted\n") % f) +
1520 (_(" local changed %s which remote deleted\n") % f) +
1527 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1521 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1528 if r == _("d"):
1522 if r == _("d"):
1529 remove.append(f)
1523 remove.append(f)
1530 else:
1524 else:
1531 self.ui.debug(_("other deleted %s\n") % f)
1525 self.ui.debug(_("other deleted %s\n") % f)
1532 remove.append(f) # other deleted it
1526 remove.append(f) # other deleted it
1533 else:
1527 else:
1534 # file is created on branch or in working directory
1528 # file is created on branch or in working directory
1535 if force and f not in umap:
1529 if force and f not in umap:
1536 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1530 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1537 remove.append(f)
1531 remove.append(f)
1538 elif n == m1.get(f, nullid): # same as parent
1532 elif n == m1.get(f, nullid): # same as parent
1539 if p2 == pa: # going backwards?
1533 if p2 == pa: # going backwards?
1540 self.ui.debug(_("remote deleted %s\n") % f)
1534 self.ui.debug(_("remote deleted %s\n") % f)
1541 remove.append(f)
1535 remove.append(f)
1542 else:
1536 else:
1543 self.ui.debug(_("local modified %s, keeping\n") % f)
1537 self.ui.debug(_("local modified %s, keeping\n") % f)
1544 else:
1538 else:
1545 self.ui.debug(_("working dir created %s, keeping\n") % f)
1539 self.ui.debug(_("working dir created %s, keeping\n") % f)
1546
1540
1547 for f, n in m2.iteritems():
1541 for f, n in m2.iteritems():
1548 if choose and not choose(f):
1542 if choose and not choose(f):
1549 continue
1543 continue
1550 if f[0] == "/":
1544 if f[0] == "/":
1551 continue
1545 continue
1552 if f in ma and n != ma[f]:
1546 if f in ma and n != ma[f]:
1553 r = _("k")
1547 r = _("k")
1554 if not force and (linear_path or allow):
1548 if not force and (linear_path or allow):
1555 r = self.ui.prompt(
1549 r = self.ui.prompt(
1556 (_("remote changed %s which local deleted\n") % f) +
1550 (_("remote changed %s which local deleted\n") % f) +
1557 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1551 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1558 if r == _("k"):
1552 if r == _("k"):
1559 get[f] = n
1553 get[f] = n
1560 elif f not in ma:
1554 elif f not in ma:
1561 self.ui.debug(_("remote created %s\n") % f)
1555 self.ui.debug(_("remote created %s\n") % f)
1562 get[f] = n
1556 get[f] = n
1563 else:
1557 else:
1564 if force or p2 == pa: # going backwards?
1558 if force or p2 == pa: # going backwards?
1565 self.ui.debug(_("local deleted %s, recreating\n") % f)
1559 self.ui.debug(_("local deleted %s, recreating\n") % f)
1566 get[f] = n
1560 get[f] = n
1567 else:
1561 else:
1568 self.ui.debug(_("local deleted %s\n") % f)
1562 self.ui.debug(_("local deleted %s\n") % f)
1569
1563
1570 del mw, m1, m2, ma
1564 del mw, m1, m2, ma
1571
1565
1572 if force:
1566 if force:
1573 for f in merge:
1567 for f in merge:
1574 get[f] = merge[f][1]
1568 get[f] = merge[f][1]
1575 merge = {}
1569 merge = {}
1576
1570
1577 if linear_path or force:
1571 if linear_path or force:
1578 # we don't need to do any magic, just jump to the new rev
1572 # we don't need to do any magic, just jump to the new rev
1579 branch_merge = False
1573 branch_merge = False
1580 p1, p2 = p2, nullid
1574 p1, p2 = p2, nullid
1581 else:
1575 else:
1582 if not allow:
1576 if not allow:
1583 self.ui.status(_("this update spans a branch"
1577 self.ui.status(_("this update spans a branch"
1584 " affecting the following files:\n"))
1578 " affecting the following files:\n"))
1585 fl = merge.keys() + get.keys()
1579 fl = merge.keys() + get.keys()
1586 fl.sort()
1580 fl.sort()
1587 for f in fl:
1581 for f in fl:
1588 cf = ""
1582 cf = ""
1589 if f in merge:
1583 if f in merge:
1590 cf = _(" (resolve)")
1584 cf = _(" (resolve)")
1591 self.ui.status(" %s%s\n" % (f, cf))
1585 self.ui.status(" %s%s\n" % (f, cf))
1592 self.ui.warn(_("aborting update spanning branches!\n"))
1586 self.ui.warn(_("aborting update spanning branches!\n"))
1593 self.ui.status(_("(use update -m to merge across branches"
1587 self.ui.status(_("(use update -m to merge across branches"
1594 " or -C to lose changes)\n"))
1588 " or -C to lose changes)\n"))
1595 return 1
1589 return 1
1596 branch_merge = True
1590 branch_merge = True
1597
1591
1598 # get the files we don't need to change
1592 # get the files we don't need to change
1599 files = get.keys()
1593 files = get.keys()
1600 files.sort()
1594 files.sort()
1601 for f in files:
1595 for f in files:
1602 if f[0] == "/":
1596 if f[0] == "/":
1603 continue
1597 continue
1604 self.ui.note(_("getting %s\n") % f)
1598 self.ui.note(_("getting %s\n") % f)
1605 t = self.file(f).read(get[f])
1599 t = self.file(f).read(get[f])
1606 self.wwrite(f, t)
1600 self.wwrite(f, t)
1607 util.set_exec(self.wjoin(f), mf2[f])
1601 util.set_exec(self.wjoin(f), mf2[f])
1608 if moddirstate:
1602 if moddirstate:
1609 if branch_merge:
1603 if branch_merge:
1610 self.dirstate.update([f], 'n', st_mtime=-1)
1604 self.dirstate.update([f], 'n', st_mtime=-1)
1611 else:
1605 else:
1612 self.dirstate.update([f], 'n')
1606 self.dirstate.update([f], 'n')
1613
1607
1614 # merge the tricky bits
1608 # merge the tricky bits
1615 files = merge.keys()
1609 files = merge.keys()
1616 files.sort()
1610 files.sort()
1617 for f in files:
1611 for f in files:
1618 self.ui.status(_("merging %s\n") % f)
1612 self.ui.status(_("merging %s\n") % f)
1619 my, other, flag = merge[f]
1613 my, other, flag = merge[f]
1620 ret = self.merge3(f, my, other)
1614 ret = self.merge3(f, my, other)
1621 if ret:
1615 if ret:
1622 err = True
1616 err = True
1623 util.set_exec(self.wjoin(f), flag)
1617 util.set_exec(self.wjoin(f), flag)
1624 if moddirstate:
1618 if moddirstate:
1625 if branch_merge:
1619 if branch_merge:
1626 # We've done a branch merge, mark this file as merged
1620 # We've done a branch merge, mark this file as merged
1627 # so that we properly record the merger later
1621 # so that we properly record the merger later
1628 self.dirstate.update([f], 'm')
1622 self.dirstate.update([f], 'm')
1629 else:
1623 else:
1630 # We've update-merged a locally modified file, so
1624 # We've update-merged a locally modified file, so
1631 # we set the dirstate to emulate a normal checkout
1625 # we set the dirstate to emulate a normal checkout
1632 # of that file some time in the past. Thus our
1626 # of that file some time in the past. Thus our
1633 # merge will appear as a normal local file
1627 # merge will appear as a normal local file
1634 # modification.
1628 # modification.
1635 f_len = len(self.file(f).read(other))
1629 f_len = len(self.file(f).read(other))
1636 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1630 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1637
1631
1638 remove.sort()
1632 remove.sort()
1639 for f in remove:
1633 for f in remove:
1640 self.ui.note(_("removing %s\n") % f)
1634 self.ui.note(_("removing %s\n") % f)
1641 try:
1635 try:
1642 util.unlink(self.wjoin(f))
1636 util.unlink(self.wjoin(f))
1643 except OSError, inst:
1637 except OSError, inst:
1644 if inst.errno != errno.ENOENT:
1638 if inst.errno != errno.ENOENT:
1645 self.ui.warn(_("update failed to remove %s: %s!\n") %
1639 self.ui.warn(_("update failed to remove %s: %s!\n") %
1646 (f, inst.strerror))
1640 (f, inst.strerror))
1647 if moddirstate:
1641 if moddirstate:
1648 if branch_merge:
1642 if branch_merge:
1649 self.dirstate.update(remove, 'r')
1643 self.dirstate.update(remove, 'r')
1650 else:
1644 else:
1651 self.dirstate.forget(remove)
1645 self.dirstate.forget(remove)
1652
1646
1653 if moddirstate:
1647 if moddirstate:
1654 self.dirstate.setparents(p1, p2)
1648 self.dirstate.setparents(p1, p2)
1655 return err
1649 return err
1656
1650
1657 def merge3(self, fn, my, other):
1651 def merge3(self, fn, my, other):
1658 """perform a 3-way merge in the working directory"""
1652 """perform a 3-way merge in the working directory"""
1659
1653
1660 def temp(prefix, node):
1654 def temp(prefix, node):
1661 pre = "%s~%s." % (os.path.basename(fn), prefix)
1655 pre = "%s~%s." % (os.path.basename(fn), prefix)
1662 (fd, name) = tempfile.mkstemp("", pre)
1656 (fd, name) = tempfile.mkstemp("", pre)
1663 f = os.fdopen(fd, "wb")
1657 f = os.fdopen(fd, "wb")
1664 self.wwrite(fn, fl.read(node), f)
1658 self.wwrite(fn, fl.read(node), f)
1665 f.close()
1659 f.close()
1666 return name
1660 return name
1667
1661
1668 fl = self.file(fn)
1662 fl = self.file(fn)
1669 base = fl.ancestor(my, other)
1663 base = fl.ancestor(my, other)
1670 a = self.wjoin(fn)
1664 a = self.wjoin(fn)
1671 b = temp("base", base)
1665 b = temp("base", base)
1672 c = temp("other", other)
1666 c = temp("other", other)
1673
1667
1674 self.ui.note(_("resolving %s\n") % fn)
1668 self.ui.note(_("resolving %s\n") % fn)
1675 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1669 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1676 (fn, short(my), short(other), short(base)))
1670 (fn, short(my), short(other), short(base)))
1677
1671
1678 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1672 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1679 or "hgmerge")
1673 or "hgmerge")
1680 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1674 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1681 if r:
1675 if r:
1682 self.ui.warn(_("merging %s failed!\n") % fn)
1676 self.ui.warn(_("merging %s failed!\n") % fn)
1683
1677
1684 os.unlink(b)
1678 os.unlink(b)
1685 os.unlink(c)
1679 os.unlink(c)
1686 return r
1680 return r
1687
1681
1688 def verify(self):
1682 def verify(self):
1689 filelinkrevs = {}
1683 filelinkrevs = {}
1690 filenodes = {}
1684 filenodes = {}
1691 changesets = revisions = files = 0
1685 changesets = revisions = files = 0
1692 errors = [0]
1686 errors = [0]
1693 neededmanifests = {}
1687 neededmanifests = {}
1694
1688
1695 def err(msg):
1689 def err(msg):
1696 self.ui.warn(msg + "\n")
1690 self.ui.warn(msg + "\n")
1697 errors[0] += 1
1691 errors[0] += 1
1698
1692
1699 def checksize(obj, name):
1693 def checksize(obj, name):
1700 d = obj.checksize()
1694 d = obj.checksize()
1701 if d[0]:
1695 if d[0]:
1702 err(_("%s data length off by %d bytes") % (name, d[0]))
1696 err(_("%s data length off by %d bytes") % (name, d[0]))
1703 if d[1]:
1697 if d[1]:
1704 err(_("%s index contains %d extra bytes") % (name, d[1]))
1698 err(_("%s index contains %d extra bytes") % (name, d[1]))
1705
1699
1706 seen = {}
1700 seen = {}
1707 self.ui.status(_("checking changesets\n"))
1701 self.ui.status(_("checking changesets\n"))
1708 checksize(self.changelog, "changelog")
1702 checksize(self.changelog, "changelog")
1709
1703
1710 for i in range(self.changelog.count()):
1704 for i in range(self.changelog.count()):
1711 changesets += 1
1705 changesets += 1
1712 n = self.changelog.node(i)
1706 n = self.changelog.node(i)
1713 l = self.changelog.linkrev(n)
1707 l = self.changelog.linkrev(n)
1714 if l != i:
1708 if l != i:
1715 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1709 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1716 if n in seen:
1710 if n in seen:
1717 err(_("duplicate changeset at revision %d") % i)
1711 err(_("duplicate changeset at revision %d") % i)
1718 seen[n] = 1
1712 seen[n] = 1
1719
1713
1720 for p in self.changelog.parents(n):
1714 for p in self.changelog.parents(n):
1721 if p not in self.changelog.nodemap:
1715 if p not in self.changelog.nodemap:
1722 err(_("changeset %s has unknown parent %s") %
1716 err(_("changeset %s has unknown parent %s") %
1723 (short(n), short(p)))
1717 (short(n), short(p)))
1724 try:
1718 try:
1725 changes = self.changelog.read(n)
1719 changes = self.changelog.read(n)
1726 except KeyboardInterrupt:
1720 except KeyboardInterrupt:
1727 self.ui.warn(_("interrupted"))
1721 self.ui.warn(_("interrupted"))
1728 raise
1722 raise
1729 except Exception, inst:
1723 except Exception, inst:
1730 err(_("unpacking changeset %s: %s") % (short(n), inst))
1724 err(_("unpacking changeset %s: %s") % (short(n), inst))
1731
1725
1732 neededmanifests[changes[0]] = n
1726 neededmanifests[changes[0]] = n
1733
1727
1734 for f in changes[3]:
1728 for f in changes[3]:
1735 filelinkrevs.setdefault(f, []).append(i)
1729 filelinkrevs.setdefault(f, []).append(i)
1736
1730
1737 seen = {}
1731 seen = {}
1738 self.ui.status(_("checking manifests\n"))
1732 self.ui.status(_("checking manifests\n"))
1739 checksize(self.manifest, "manifest")
1733 checksize(self.manifest, "manifest")
1740
1734
1741 for i in range(self.manifest.count()):
1735 for i in range(self.manifest.count()):
1742 n = self.manifest.node(i)
1736 n = self.manifest.node(i)
1743 l = self.manifest.linkrev(n)
1737 l = self.manifest.linkrev(n)
1744
1738
1745 if l < 0 or l >= self.changelog.count():
1739 if l < 0 or l >= self.changelog.count():
1746 err(_("bad manifest link (%d) at revision %d") % (l, i))
1740 err(_("bad manifest link (%d) at revision %d") % (l, i))
1747
1741
1748 if n in neededmanifests:
1742 if n in neededmanifests:
1749 del neededmanifests[n]
1743 del neededmanifests[n]
1750
1744
1751 if n in seen:
1745 if n in seen:
1752 err(_("duplicate manifest at revision %d") % i)
1746 err(_("duplicate manifest at revision %d") % i)
1753
1747
1754 seen[n] = 1
1748 seen[n] = 1
1755
1749
1756 for p in self.manifest.parents(n):
1750 for p in self.manifest.parents(n):
1757 if p not in self.manifest.nodemap:
1751 if p not in self.manifest.nodemap:
1758 err(_("manifest %s has unknown parent %s") %
1752 err(_("manifest %s has unknown parent %s") %
1759 (short(n), short(p)))
1753 (short(n), short(p)))
1760
1754
1761 try:
1755 try:
1762 delta = mdiff.patchtext(self.manifest.delta(n))
1756 delta = mdiff.patchtext(self.manifest.delta(n))
1763 except KeyboardInterrupt:
1757 except KeyboardInterrupt:
1764 self.ui.warn(_("interrupted"))
1758 self.ui.warn(_("interrupted"))
1765 raise
1759 raise
1766 except Exception, inst:
1760 except Exception, inst:
1767 err(_("unpacking manifest %s: %s") % (short(n), inst))
1761 err(_("unpacking manifest %s: %s") % (short(n), inst))
1768
1762
1769 ff = [ l.split('\0') for l in delta.splitlines() ]
1763 ff = [ l.split('\0') for l in delta.splitlines() ]
1770 for f, fn in ff:
1764 for f, fn in ff:
1771 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1765 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1772
1766
1773 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1767 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1774
1768
1775 for m, c in neededmanifests.items():
1769 for m, c in neededmanifests.items():
1776 err(_("Changeset %s refers to unknown manifest %s") %
1770 err(_("Changeset %s refers to unknown manifest %s") %
1777 (short(m), short(c)))
1771 (short(m), short(c)))
1778 del neededmanifests
1772 del neededmanifests
1779
1773
1780 for f in filenodes:
1774 for f in filenodes:
1781 if f not in filelinkrevs:
1775 if f not in filelinkrevs:
1782 err(_("file %s in manifest but not in changesets") % f)
1776 err(_("file %s in manifest but not in changesets") % f)
1783
1777
1784 for f in filelinkrevs:
1778 for f in filelinkrevs:
1785 if f not in filenodes:
1779 if f not in filenodes:
1786 err(_("file %s in changeset but not in manifest") % f)
1780 err(_("file %s in changeset but not in manifest") % f)
1787
1781
1788 self.ui.status(_("checking files\n"))
1782 self.ui.status(_("checking files\n"))
1789 ff = filenodes.keys()
1783 ff = filenodes.keys()
1790 ff.sort()
1784 ff.sort()
1791 for f in ff:
1785 for f in ff:
1792 if f == "/dev/null":
1786 if f == "/dev/null":
1793 continue
1787 continue
1794 files += 1
1788 files += 1
1795 fl = self.file(f)
1789 fl = self.file(f)
1796 checksize(fl, f)
1790 checksize(fl, f)
1797
1791
1798 nodes = {nullid: 1}
1792 nodes = {nullid: 1}
1799 seen = {}
1793 seen = {}
1800 for i in range(fl.count()):
1794 for i in range(fl.count()):
1801 revisions += 1
1795 revisions += 1
1802 n = fl.node(i)
1796 n = fl.node(i)
1803
1797
1804 if n in seen:
1798 if n in seen:
1805 err(_("%s: duplicate revision %d") % (f, i))
1799 err(_("%s: duplicate revision %d") % (f, i))
1806 if n not in filenodes[f]:
1800 if n not in filenodes[f]:
1807 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1801 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1808 else:
1802 else:
1809 del filenodes[f][n]
1803 del filenodes[f][n]
1810
1804
1811 flr = fl.linkrev(n)
1805 flr = fl.linkrev(n)
1812 if flr not in filelinkrevs[f]:
1806 if flr not in filelinkrevs[f]:
1813 err(_("%s:%s points to unexpected changeset %d")
1807 err(_("%s:%s points to unexpected changeset %d")
1814 % (f, short(n), flr))
1808 % (f, short(n), flr))
1815 else:
1809 else:
1816 filelinkrevs[f].remove(flr)
1810 filelinkrevs[f].remove(flr)
1817
1811
1818 # verify contents
1812 # verify contents
1819 try:
1813 try:
1820 t = fl.read(n)
1814 t = fl.read(n)
1821 except KeyboardInterrupt:
1815 except KeyboardInterrupt:
1822 self.ui.warn(_("interrupted"))
1816 self.ui.warn(_("interrupted"))
1823 raise
1817 raise
1824 except Exception, inst:
1818 except Exception, inst:
1825 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1819 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1826
1820
1827 # verify parents
1821 # verify parents
1828 (p1, p2) = fl.parents(n)
1822 (p1, p2) = fl.parents(n)
1829 if p1 not in nodes:
1823 if p1 not in nodes:
1830 err(_("file %s:%s unknown parent 1 %s") %
1824 err(_("file %s:%s unknown parent 1 %s") %
1831 (f, short(n), short(p1)))
1825 (f, short(n), short(p1)))
1832 if p2 not in nodes:
1826 if p2 not in nodes:
1833 err(_("file %s:%s unknown parent 2 %s") %
1827 err(_("file %s:%s unknown parent 2 %s") %
1834 (f, short(n), short(p1)))
1828 (f, short(n), short(p1)))
1835 nodes[n] = 1
1829 nodes[n] = 1
1836
1830
1837 # cross-check
1831 # cross-check
1838 for node in filenodes[f]:
1832 for node in filenodes[f]:
1839 err(_("node %s in manifests not in %s") % (hex(node), f))
1833 err(_("node %s in manifests not in %s") % (hex(node), f))
1840
1834
1841 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1835 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1842 (files, changesets, revisions))
1836 (files, changesets, revisions))
1843
1837
1844 if errors[0]:
1838 if errors[0]:
1845 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1839 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1846 return 1
1840 return 1
General Comments 0
You need to be logged in to leave comments. Login now