##// END OF EJS Templates
Better error message (without /.hg appended) when repository is not found....
Thomas Arendsen Hein -
r1588:a679a364 default
parent child Browse files
Show More
@@ -1,1798 +1,1798 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct, os, util
8 import struct, os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14
14
15 class localrepository(object):
15 class localrepository(object):
16 def __init__(self, ui, path=None, create=0):
16 def __init__(self, ui, path=None, create=0):
17 if not path:
17 if not path:
18 p = os.getcwd()
18 p = os.getcwd()
19 while not os.path.isdir(os.path.join(p, ".hg")):
19 while not os.path.isdir(os.path.join(p, ".hg")):
20 oldp = p
20 oldp = p
21 p = os.path.dirname(p)
21 p = os.path.dirname(p)
22 if p == oldp: raise repo.RepoError(_("no repo found"))
22 if p == oldp: raise repo.RepoError(_("no repo found"))
23 path = p
23 path = p
24 self.path = os.path.join(path, ".hg")
24 self.path = os.path.join(path, ".hg")
25
25
26 if not create and not os.path.isdir(self.path):
26 if not create and not os.path.isdir(self.path):
27 raise repo.RepoError(_("repository %s not found") % self.path)
27 raise repo.RepoError(_("repository %s not found") % path)
28
28
29 self.root = os.path.abspath(path)
29 self.root = os.path.abspath(path)
30 self.ui = ui
30 self.ui = ui
31 self.opener = util.opener(self.path)
31 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
32 self.wopener = util.opener(self.root)
33 self.manifest = manifest.manifest(self.opener)
33 self.manifest = manifest.manifest(self.opener)
34 self.changelog = changelog.changelog(self.opener)
34 self.changelog = changelog.changelog(self.opener)
35 self.tagscache = None
35 self.tagscache = None
36 self.nodetagscache = None
36 self.nodetagscache = None
37 self.encodepats = None
37 self.encodepats = None
38 self.decodepats = None
38 self.decodepats = None
39
39
40 if create:
40 if create:
41 os.mkdir(self.path)
41 os.mkdir(self.path)
42 os.mkdir(self.join("data"))
42 os.mkdir(self.join("data"))
43
43
44 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
44 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
45 try:
45 try:
46 self.ui.readconfig(self.join("hgrc"))
46 self.ui.readconfig(self.join("hgrc"))
47 except IOError: pass
47 except IOError: pass
48
48
49 def hook(self, name, **args):
49 def hook(self, name, **args):
50 def runhook(name, cmd):
50 def runhook(name, cmd):
51 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
51 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
52 old = {}
52 old = {}
53 for k, v in args.items():
53 for k, v in args.items():
54 k = k.upper()
54 k = k.upper()
55 old[k] = os.environ.get(k, None)
55 old[k] = os.environ.get(k, None)
56 os.environ[k] = v
56 os.environ[k] = v
57
57
58 # Hooks run in the repository root
58 # Hooks run in the repository root
59 olddir = os.getcwd()
59 olddir = os.getcwd()
60 os.chdir(self.root)
60 os.chdir(self.root)
61 r = os.system(cmd)
61 r = os.system(cmd)
62 os.chdir(olddir)
62 os.chdir(olddir)
63
63
64 for k, v in old.items():
64 for k, v in old.items():
65 if v != None:
65 if v != None:
66 os.environ[k] = v
66 os.environ[k] = v
67 else:
67 else:
68 del os.environ[k]
68 del os.environ[k]
69
69
70 if r:
70 if r:
71 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
71 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
72 (name, r))
72 (name, r))
73 return False
73 return False
74 return True
74 return True
75
75
76 r = True
76 r = True
77 for hname, cmd in self.ui.configitems("hooks"):
77 for hname, cmd in self.ui.configitems("hooks"):
78 s = hname.split(".")
78 s = hname.split(".")
79 if s[0] == name and cmd:
79 if s[0] == name and cmd:
80 r = runhook(hname, cmd) and r
80 r = runhook(hname, cmd) and r
81 return r
81 return r
82
82
83 def tags(self):
83 def tags(self):
84 '''return a mapping of tag to node'''
84 '''return a mapping of tag to node'''
85 if not self.tagscache:
85 if not self.tagscache:
86 self.tagscache = {}
86 self.tagscache = {}
87 def addtag(self, k, n):
87 def addtag(self, k, n):
88 try:
88 try:
89 bin_n = bin(n)
89 bin_n = bin(n)
90 except TypeError:
90 except TypeError:
91 bin_n = ''
91 bin_n = ''
92 self.tagscache[k.strip()] = bin_n
92 self.tagscache[k.strip()] = bin_n
93
93
94 try:
94 try:
95 # read each head of the tags file, ending with the tip
95 # read each head of the tags file, ending with the tip
96 # and add each tag found to the map, with "newer" ones
96 # and add each tag found to the map, with "newer" ones
97 # taking precedence
97 # taking precedence
98 fl = self.file(".hgtags")
98 fl = self.file(".hgtags")
99 h = fl.heads()
99 h = fl.heads()
100 h.reverse()
100 h.reverse()
101 for r in h:
101 for r in h:
102 for l in fl.read(r).splitlines():
102 for l in fl.read(r).splitlines():
103 if l:
103 if l:
104 n, k = l.split(" ", 1)
104 n, k = l.split(" ", 1)
105 addtag(self, k, n)
105 addtag(self, k, n)
106 except KeyError:
106 except KeyError:
107 pass
107 pass
108
108
109 try:
109 try:
110 f = self.opener("localtags")
110 f = self.opener("localtags")
111 for l in f:
111 for l in f:
112 n, k = l.split(" ", 1)
112 n, k = l.split(" ", 1)
113 addtag(self, k, n)
113 addtag(self, k, n)
114 except IOError:
114 except IOError:
115 pass
115 pass
116
116
117 self.tagscache['tip'] = self.changelog.tip()
117 self.tagscache['tip'] = self.changelog.tip()
118
118
119 return self.tagscache
119 return self.tagscache
120
120
121 def tagslist(self):
121 def tagslist(self):
122 '''return a list of tags ordered by revision'''
122 '''return a list of tags ordered by revision'''
123 l = []
123 l = []
124 for t, n in self.tags().items():
124 for t, n in self.tags().items():
125 try:
125 try:
126 r = self.changelog.rev(n)
126 r = self.changelog.rev(n)
127 except:
127 except:
128 r = -2 # sort to the beginning of the list if unknown
128 r = -2 # sort to the beginning of the list if unknown
129 l.append((r,t,n))
129 l.append((r,t,n))
130 l.sort()
130 l.sort()
131 return [(t,n) for r,t,n in l]
131 return [(t,n) for r,t,n in l]
132
132
133 def nodetags(self, node):
133 def nodetags(self, node):
134 '''return the tags associated with a node'''
134 '''return the tags associated with a node'''
135 if not self.nodetagscache:
135 if not self.nodetagscache:
136 self.nodetagscache = {}
136 self.nodetagscache = {}
137 for t,n in self.tags().items():
137 for t,n in self.tags().items():
138 self.nodetagscache.setdefault(n,[]).append(t)
138 self.nodetagscache.setdefault(n,[]).append(t)
139 return self.nodetagscache.get(node, [])
139 return self.nodetagscache.get(node, [])
140
140
141 def lookup(self, key):
141 def lookup(self, key):
142 try:
142 try:
143 return self.tags()[key]
143 return self.tags()[key]
144 except KeyError:
144 except KeyError:
145 try:
145 try:
146 return self.changelog.lookup(key)
146 return self.changelog.lookup(key)
147 except:
147 except:
148 raise repo.RepoError(_("unknown revision '%s'") % key)
148 raise repo.RepoError(_("unknown revision '%s'") % key)
149
149
150 def dev(self):
150 def dev(self):
151 return os.stat(self.path).st_dev
151 return os.stat(self.path).st_dev
152
152
153 def local(self):
153 def local(self):
154 return True
154 return True
155
155
156 def join(self, f):
156 def join(self, f):
157 return os.path.join(self.path, f)
157 return os.path.join(self.path, f)
158
158
159 def wjoin(self, f):
159 def wjoin(self, f):
160 return os.path.join(self.root, f)
160 return os.path.join(self.root, f)
161
161
162 def file(self, f):
162 def file(self, f):
163 if f[0] == '/': f = f[1:]
163 if f[0] == '/': f = f[1:]
164 return filelog.filelog(self.opener, f)
164 return filelog.filelog(self.opener, f)
165
165
166 def getcwd(self):
166 def getcwd(self):
167 return self.dirstate.getcwd()
167 return self.dirstate.getcwd()
168
168
169 def wfile(self, f, mode='r'):
169 def wfile(self, f, mode='r'):
170 return self.wopener(f, mode)
170 return self.wopener(f, mode)
171
171
172 def wread(self, filename):
172 def wread(self, filename):
173 if self.encodepats == None:
173 if self.encodepats == None:
174 l = []
174 l = []
175 for pat, cmd in self.ui.configitems("encode"):
175 for pat, cmd in self.ui.configitems("encode"):
176 mf = util.matcher("", "/", [pat], [], [])[1]
176 mf = util.matcher("", "/", [pat], [], [])[1]
177 l.append((mf, cmd))
177 l.append((mf, cmd))
178 self.encodepats = l
178 self.encodepats = l
179
179
180 data = self.wopener(filename, 'r').read()
180 data = self.wopener(filename, 'r').read()
181
181
182 for mf, cmd in self.encodepats:
182 for mf, cmd in self.encodepats:
183 if mf(filename):
183 if mf(filename):
184 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
184 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
185 data = util.filter(data, cmd)
185 data = util.filter(data, cmd)
186 break
186 break
187
187
188 return data
188 return data
189
189
190 def wwrite(self, filename, data, fd=None):
190 def wwrite(self, filename, data, fd=None):
191 if self.decodepats == None:
191 if self.decodepats == None:
192 l = []
192 l = []
193 for pat, cmd in self.ui.configitems("decode"):
193 for pat, cmd in self.ui.configitems("decode"):
194 mf = util.matcher("", "/", [pat], [], [])[1]
194 mf = util.matcher("", "/", [pat], [], [])[1]
195 l.append((mf, cmd))
195 l.append((mf, cmd))
196 self.decodepats = l
196 self.decodepats = l
197
197
198 for mf, cmd in self.decodepats:
198 for mf, cmd in self.decodepats:
199 if mf(filename):
199 if mf(filename):
200 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
200 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
201 data = util.filter(data, cmd)
201 data = util.filter(data, cmd)
202 break
202 break
203
203
204 if fd:
204 if fd:
205 return fd.write(data)
205 return fd.write(data)
206 return self.wopener(filename, 'w').write(data)
206 return self.wopener(filename, 'w').write(data)
207
207
208 def transaction(self):
208 def transaction(self):
209 # save dirstate for undo
209 # save dirstate for undo
210 try:
210 try:
211 ds = self.opener("dirstate").read()
211 ds = self.opener("dirstate").read()
212 except IOError:
212 except IOError:
213 ds = ""
213 ds = ""
214 self.opener("journal.dirstate", "w").write(ds)
214 self.opener("journal.dirstate", "w").write(ds)
215
215
216 def after():
216 def after():
217 util.rename(self.join("journal"), self.join("undo"))
217 util.rename(self.join("journal"), self.join("undo"))
218 util.rename(self.join("journal.dirstate"),
218 util.rename(self.join("journal.dirstate"),
219 self.join("undo.dirstate"))
219 self.join("undo.dirstate"))
220
220
221 return transaction.transaction(self.ui.warn, self.opener,
221 return transaction.transaction(self.ui.warn, self.opener,
222 self.join("journal"), after)
222 self.join("journal"), after)
223
223
224 def recover(self):
224 def recover(self):
225 lock = self.lock()
225 lock = self.lock()
226 if os.path.exists(self.join("journal")):
226 if os.path.exists(self.join("journal")):
227 self.ui.status(_("rolling back interrupted transaction\n"))
227 self.ui.status(_("rolling back interrupted transaction\n"))
228 transaction.rollback(self.opener, self.join("journal"))
228 transaction.rollback(self.opener, self.join("journal"))
229 return True
229 return True
230 else:
230 else:
231 self.ui.warn(_("no interrupted transaction available\n"))
231 self.ui.warn(_("no interrupted transaction available\n"))
232 return False
232 return False
233
233
234 def undo(self):
234 def undo(self):
235 wlock = self.wlock()
235 wlock = self.wlock()
236 lock = self.lock()
236 lock = self.lock()
237 if os.path.exists(self.join("undo")):
237 if os.path.exists(self.join("undo")):
238 self.ui.status(_("rolling back last transaction\n"))
238 self.ui.status(_("rolling back last transaction\n"))
239 transaction.rollback(self.opener, self.join("undo"))
239 transaction.rollback(self.opener, self.join("undo"))
240 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
240 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
241 self.dirstate.read()
241 self.dirstate.read()
242 else:
242 else:
243 self.ui.warn(_("no undo information available\n"))
243 self.ui.warn(_("no undo information available\n"))
244
244
245 def lock(self, wait=1):
245 def lock(self, wait=1):
246 try:
246 try:
247 return lock.lock(self.join("lock"), 0)
247 return lock.lock(self.join("lock"), 0)
248 except lock.LockHeld, inst:
248 except lock.LockHeld, inst:
249 if wait:
249 if wait:
250 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
250 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
251 return lock.lock(self.join("lock"), wait)
251 return lock.lock(self.join("lock"), wait)
252 raise inst
252 raise inst
253
253
254 def wlock(self, wait=1):
254 def wlock(self, wait=1):
255 try:
255 try:
256 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
256 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
257 except lock.LockHeld, inst:
257 except lock.LockHeld, inst:
258 if not wait:
258 if not wait:
259 raise inst
259 raise inst
260 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
260 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
261 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
261 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
262 self.dirstate.read()
262 self.dirstate.read()
263 return wlock
263 return wlock
264
264
265 def rawcommit(self, files, text, user, date, p1=None, p2=None):
265 def rawcommit(self, files, text, user, date, p1=None, p2=None):
266 orig_parent = self.dirstate.parents()[0] or nullid
266 orig_parent = self.dirstate.parents()[0] or nullid
267 p1 = p1 or self.dirstate.parents()[0] or nullid
267 p1 = p1 or self.dirstate.parents()[0] or nullid
268 p2 = p2 or self.dirstate.parents()[1] or nullid
268 p2 = p2 or self.dirstate.parents()[1] or nullid
269 c1 = self.changelog.read(p1)
269 c1 = self.changelog.read(p1)
270 c2 = self.changelog.read(p2)
270 c2 = self.changelog.read(p2)
271 m1 = self.manifest.read(c1[0])
271 m1 = self.manifest.read(c1[0])
272 mf1 = self.manifest.readflags(c1[0])
272 mf1 = self.manifest.readflags(c1[0])
273 m2 = self.manifest.read(c2[0])
273 m2 = self.manifest.read(c2[0])
274 changed = []
274 changed = []
275
275
276 if orig_parent == p1:
276 if orig_parent == p1:
277 update_dirstate = 1
277 update_dirstate = 1
278 else:
278 else:
279 update_dirstate = 0
279 update_dirstate = 0
280
280
281 wlock = self.wlock()
281 wlock = self.wlock()
282 lock = self.lock()
282 lock = self.lock()
283 tr = self.transaction()
283 tr = self.transaction()
284 mm = m1.copy()
284 mm = m1.copy()
285 mfm = mf1.copy()
285 mfm = mf1.copy()
286 linkrev = self.changelog.count()
286 linkrev = self.changelog.count()
287 for f in files:
287 for f in files:
288 try:
288 try:
289 t = self.wread(f)
289 t = self.wread(f)
290 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
290 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
291 r = self.file(f)
291 r = self.file(f)
292 mfm[f] = tm
292 mfm[f] = tm
293
293
294 fp1 = m1.get(f, nullid)
294 fp1 = m1.get(f, nullid)
295 fp2 = m2.get(f, nullid)
295 fp2 = m2.get(f, nullid)
296
296
297 # is the same revision on two branches of a merge?
297 # is the same revision on two branches of a merge?
298 if fp2 == fp1:
298 if fp2 == fp1:
299 fp2 = nullid
299 fp2 = nullid
300
300
301 if fp2 != nullid:
301 if fp2 != nullid:
302 # is one parent an ancestor of the other?
302 # is one parent an ancestor of the other?
303 fpa = r.ancestor(fp1, fp2)
303 fpa = r.ancestor(fp1, fp2)
304 if fpa == fp1:
304 if fpa == fp1:
305 fp1, fp2 = fp2, nullid
305 fp1, fp2 = fp2, nullid
306 elif fpa == fp2:
306 elif fpa == fp2:
307 fp2 = nullid
307 fp2 = nullid
308
308
309 # is the file unmodified from the parent?
309 # is the file unmodified from the parent?
310 if t == r.read(fp1):
310 if t == r.read(fp1):
311 # record the proper existing parent in manifest
311 # record the proper existing parent in manifest
312 # no need to add a revision
312 # no need to add a revision
313 mm[f] = fp1
313 mm[f] = fp1
314 continue
314 continue
315
315
316 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
316 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
317 changed.append(f)
317 changed.append(f)
318 if update_dirstate:
318 if update_dirstate:
319 self.dirstate.update([f], "n")
319 self.dirstate.update([f], "n")
320 except IOError:
320 except IOError:
321 try:
321 try:
322 del mm[f]
322 del mm[f]
323 del mfm[f]
323 del mfm[f]
324 if update_dirstate:
324 if update_dirstate:
325 self.dirstate.forget([f])
325 self.dirstate.forget([f])
326 except:
326 except:
327 # deleted from p2?
327 # deleted from p2?
328 pass
328 pass
329
329
330 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
330 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
331 user = user or self.ui.username()
331 user = user or self.ui.username()
332 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
332 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
333 tr.close()
333 tr.close()
334 if update_dirstate:
334 if update_dirstate:
335 self.dirstate.setparents(n, nullid)
335 self.dirstate.setparents(n, nullid)
336
336
337 def commit(self, files = None, text = "", user = None, date = None,
337 def commit(self, files = None, text = "", user = None, date = None,
338 match = util.always, force=False):
338 match = util.always, force=False):
339 commit = []
339 commit = []
340 remove = []
340 remove = []
341 changed = []
341 changed = []
342
342
343 if files:
343 if files:
344 for f in files:
344 for f in files:
345 s = self.dirstate.state(f)
345 s = self.dirstate.state(f)
346 if s in 'nmai':
346 if s in 'nmai':
347 commit.append(f)
347 commit.append(f)
348 elif s == 'r':
348 elif s == 'r':
349 remove.append(f)
349 remove.append(f)
350 else:
350 else:
351 self.ui.warn(_("%s not tracked!\n") % f)
351 self.ui.warn(_("%s not tracked!\n") % f)
352 else:
352 else:
353 (c, a, d, u) = self.changes(match=match)
353 (c, a, d, u) = self.changes(match=match)
354 commit = c + a
354 commit = c + a
355 remove = d
355 remove = d
356
356
357 p1, p2 = self.dirstate.parents()
357 p1, p2 = self.dirstate.parents()
358 c1 = self.changelog.read(p1)
358 c1 = self.changelog.read(p1)
359 c2 = self.changelog.read(p2)
359 c2 = self.changelog.read(p2)
360 m1 = self.manifest.read(c1[0])
360 m1 = self.manifest.read(c1[0])
361 mf1 = self.manifest.readflags(c1[0])
361 mf1 = self.manifest.readflags(c1[0])
362 m2 = self.manifest.read(c2[0])
362 m2 = self.manifest.read(c2[0])
363
363
364 if not commit and not remove and not force and p2 == nullid:
364 if not commit and not remove and not force and p2 == nullid:
365 self.ui.status(_("nothing changed\n"))
365 self.ui.status(_("nothing changed\n"))
366 return None
366 return None
367
367
368 if not self.hook("precommit"):
368 if not self.hook("precommit"):
369 return None
369 return None
370
370
371 wlock = self.wlock()
371 wlock = self.wlock()
372 lock = self.lock()
372 lock = self.lock()
373 tr = self.transaction()
373 tr = self.transaction()
374
374
375 # check in files
375 # check in files
376 new = {}
376 new = {}
377 linkrev = self.changelog.count()
377 linkrev = self.changelog.count()
378 commit.sort()
378 commit.sort()
379 for f in commit:
379 for f in commit:
380 self.ui.note(f + "\n")
380 self.ui.note(f + "\n")
381 try:
381 try:
382 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
382 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
383 t = self.wread(f)
383 t = self.wread(f)
384 except IOError:
384 except IOError:
385 self.ui.warn(_("trouble committing %s!\n") % f)
385 self.ui.warn(_("trouble committing %s!\n") % f)
386 raise
386 raise
387
387
388 r = self.file(f)
388 r = self.file(f)
389
389
390 meta = {}
390 meta = {}
391 cp = self.dirstate.copied(f)
391 cp = self.dirstate.copied(f)
392 if cp:
392 if cp:
393 meta["copy"] = cp
393 meta["copy"] = cp
394 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
394 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
395 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
395 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
396 fp1, fp2 = nullid, nullid
396 fp1, fp2 = nullid, nullid
397 else:
397 else:
398 fp1 = m1.get(f, nullid)
398 fp1 = m1.get(f, nullid)
399 fp2 = m2.get(f, nullid)
399 fp2 = m2.get(f, nullid)
400
400
401 # is the same revision on two branches of a merge?
401 # is the same revision on two branches of a merge?
402 if fp2 == fp1:
402 if fp2 == fp1:
403 fp2 = nullid
403 fp2 = nullid
404
404
405 if fp2 != nullid:
405 if fp2 != nullid:
406 # is one parent an ancestor of the other?
406 # is one parent an ancestor of the other?
407 fpa = r.ancestor(fp1, fp2)
407 fpa = r.ancestor(fp1, fp2)
408 if fpa == fp1:
408 if fpa == fp1:
409 fp1, fp2 = fp2, nullid
409 fp1, fp2 = fp2, nullid
410 elif fpa == fp2:
410 elif fpa == fp2:
411 fp2 = nullid
411 fp2 = nullid
412
412
413 # is the file unmodified from the parent?
413 # is the file unmodified from the parent?
414 if not meta and t == r.read(fp1):
414 if not meta and t == r.read(fp1):
415 # record the proper existing parent in manifest
415 # record the proper existing parent in manifest
416 # no need to add a revision
416 # no need to add a revision
417 new[f] = fp1
417 new[f] = fp1
418 continue
418 continue
419
419
420 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
420 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
421 # remember what we've added so that we can later calculate
421 # remember what we've added so that we can later calculate
422 # the files to pull from a set of changesets
422 # the files to pull from a set of changesets
423 changed.append(f)
423 changed.append(f)
424
424
425 # update manifest
425 # update manifest
426 m1.update(new)
426 m1.update(new)
427 for f in remove:
427 for f in remove:
428 if f in m1:
428 if f in m1:
429 del m1[f]
429 del m1[f]
430 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
430 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
431 (new, remove))
431 (new, remove))
432
432
433 # add changeset
433 # add changeset
434 new = new.keys()
434 new = new.keys()
435 new.sort()
435 new.sort()
436
436
437 if not text:
437 if not text:
438 edittext = ""
438 edittext = ""
439 if p2 != nullid:
439 if p2 != nullid:
440 edittext += "HG: branch merge\n"
440 edittext += "HG: branch merge\n"
441 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
441 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
442 edittext += "".join(["HG: changed %s\n" % f for f in changed])
442 edittext += "".join(["HG: changed %s\n" % f for f in changed])
443 edittext += "".join(["HG: removed %s\n" % f for f in remove])
443 edittext += "".join(["HG: removed %s\n" % f for f in remove])
444 if not changed and not remove:
444 if not changed and not remove:
445 edittext += "HG: no files changed\n"
445 edittext += "HG: no files changed\n"
446 edittext = self.ui.edit(edittext)
446 edittext = self.ui.edit(edittext)
447 if not edittext.rstrip():
447 if not edittext.rstrip():
448 return None
448 return None
449 text = edittext
449 text = edittext
450
450
451 user = user or self.ui.username()
451 user = user or self.ui.username()
452 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
452 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
453 tr.close()
453 tr.close()
454
454
455 self.dirstate.setparents(n)
455 self.dirstate.setparents(n)
456 self.dirstate.update(new, "n")
456 self.dirstate.update(new, "n")
457 self.dirstate.forget(remove)
457 self.dirstate.forget(remove)
458
458
459 if not self.hook("commit", node=hex(n)):
459 if not self.hook("commit", node=hex(n)):
460 return None
460 return None
461 return n
461 return n
462
462
463 def walk(self, node=None, files=[], match=util.always):
463 def walk(self, node=None, files=[], match=util.always):
464 if node:
464 if node:
465 fdict = dict.fromkeys(files)
465 fdict = dict.fromkeys(files)
466 for fn in self.manifest.read(self.changelog.read(node)[0]):
466 for fn in self.manifest.read(self.changelog.read(node)[0]):
467 fdict.pop(fn, None)
467 fdict.pop(fn, None)
468 if match(fn):
468 if match(fn):
469 yield 'm', fn
469 yield 'm', fn
470 for fn in fdict:
470 for fn in fdict:
471 self.ui.warn(_('%s: No such file in rev %s\n') % (
471 self.ui.warn(_('%s: No such file in rev %s\n') % (
472 util.pathto(self.getcwd(), fn), short(node)))
472 util.pathto(self.getcwd(), fn), short(node)))
473 else:
473 else:
474 for src, fn in self.dirstate.walk(files, match):
474 for src, fn in self.dirstate.walk(files, match):
475 yield src, fn
475 yield src, fn
476
476
477 def changes(self, node1 = None, node2 = None, files = [],
477 def changes(self, node1 = None, node2 = None, files = [],
478 match = util.always):
478 match = util.always):
479 mf2, u = None, []
479 mf2, u = None, []
480
480
481 def fcmp(fn, mf):
481 def fcmp(fn, mf):
482 t1 = self.wread(fn)
482 t1 = self.wread(fn)
483 t2 = self.file(fn).read(mf.get(fn, nullid))
483 t2 = self.file(fn).read(mf.get(fn, nullid))
484 return cmp(t1, t2)
484 return cmp(t1, t2)
485
485
486 def mfmatches(node):
486 def mfmatches(node):
487 mf = dict(self.manifest.read(node))
487 mf = dict(self.manifest.read(node))
488 for fn in mf.keys():
488 for fn in mf.keys():
489 if not match(fn):
489 if not match(fn):
490 del mf[fn]
490 del mf[fn]
491 return mf
491 return mf
492
492
493 # are we comparing the working directory?
493 # are we comparing the working directory?
494 if not node2:
494 if not node2:
495 try:
495 try:
496 wlock = self.wlock(wait=0)
496 wlock = self.wlock(wait=0)
497 except lock.LockHeld:
497 except lock.LockHeld:
498 wlock = None
498 wlock = None
499 l, c, a, d, u = self.dirstate.changes(files, match)
499 l, c, a, d, u = self.dirstate.changes(files, match)
500
500
501 # are we comparing working dir against its parent?
501 # are we comparing working dir against its parent?
502 if not node1:
502 if not node1:
503 if l:
503 if l:
504 # do a full compare of any files that might have changed
504 # do a full compare of any files that might have changed
505 change = self.changelog.read(self.dirstate.parents()[0])
505 change = self.changelog.read(self.dirstate.parents()[0])
506 mf2 = mfmatches(change[0])
506 mf2 = mfmatches(change[0])
507 for f in l:
507 for f in l:
508 if fcmp(f, mf2):
508 if fcmp(f, mf2):
509 c.append(f)
509 c.append(f)
510 elif wlock is not None:
510 elif wlock is not None:
511 self.dirstate.update([f], "n")
511 self.dirstate.update([f], "n")
512
512
513 for l in c, a, d, u:
513 for l in c, a, d, u:
514 l.sort()
514 l.sort()
515
515
516 return (c, a, d, u)
516 return (c, a, d, u)
517
517
518 # are we comparing working dir against non-tip?
518 # are we comparing working dir against non-tip?
519 # generate a pseudo-manifest for the working dir
519 # generate a pseudo-manifest for the working dir
520 if not node2:
520 if not node2:
521 if not mf2:
521 if not mf2:
522 change = self.changelog.read(self.dirstate.parents()[0])
522 change = self.changelog.read(self.dirstate.parents()[0])
523 mf2 = mfmatches(change[0])
523 mf2 = mfmatches(change[0])
524 for f in a + c + l:
524 for f in a + c + l:
525 mf2[f] = ""
525 mf2[f] = ""
526 for f in d:
526 for f in d:
527 if f in mf2: del mf2[f]
527 if f in mf2: del mf2[f]
528 else:
528 else:
529 change = self.changelog.read(node2)
529 change = self.changelog.read(node2)
530 mf2 = mfmatches(change[0])
530 mf2 = mfmatches(change[0])
531
531
532 # flush lists from dirstate before comparing manifests
532 # flush lists from dirstate before comparing manifests
533 c, a = [], []
533 c, a = [], []
534
534
535 change = self.changelog.read(node1)
535 change = self.changelog.read(node1)
536 mf1 = mfmatches(change[0])
536 mf1 = mfmatches(change[0])
537
537
538 for fn in mf2:
538 for fn in mf2:
539 if mf1.has_key(fn):
539 if mf1.has_key(fn):
540 if mf1[fn] != mf2[fn]:
540 if mf1[fn] != mf2[fn]:
541 if mf2[fn] != "" or fcmp(fn, mf1):
541 if mf2[fn] != "" or fcmp(fn, mf1):
542 c.append(fn)
542 c.append(fn)
543 del mf1[fn]
543 del mf1[fn]
544 else:
544 else:
545 a.append(fn)
545 a.append(fn)
546
546
547 d = mf1.keys()
547 d = mf1.keys()
548
548
549 for l in c, a, d, u:
549 for l in c, a, d, u:
550 l.sort()
550 l.sort()
551
551
552 return (c, a, d, u)
552 return (c, a, d, u)
553
553
554 def add(self, list):
554 def add(self, list):
555 wlock = self.wlock()
555 wlock = self.wlock()
556 for f in list:
556 for f in list:
557 p = self.wjoin(f)
557 p = self.wjoin(f)
558 if not os.path.exists(p):
558 if not os.path.exists(p):
559 self.ui.warn(_("%s does not exist!\n") % f)
559 self.ui.warn(_("%s does not exist!\n") % f)
560 elif not os.path.isfile(p):
560 elif not os.path.isfile(p):
561 self.ui.warn(_("%s not added: only files supported currently\n") % f)
561 self.ui.warn(_("%s not added: only files supported currently\n") % f)
562 elif self.dirstate.state(f) in 'an':
562 elif self.dirstate.state(f) in 'an':
563 self.ui.warn(_("%s already tracked!\n") % f)
563 self.ui.warn(_("%s already tracked!\n") % f)
564 else:
564 else:
565 self.dirstate.update([f], "a")
565 self.dirstate.update([f], "a")
566
566
567 def forget(self, list):
567 def forget(self, list):
568 wlock = self.wlock()
568 wlock = self.wlock()
569 for f in list:
569 for f in list:
570 if self.dirstate.state(f) not in 'ai':
570 if self.dirstate.state(f) not in 'ai':
571 self.ui.warn(_("%s not added!\n") % f)
571 self.ui.warn(_("%s not added!\n") % f)
572 else:
572 else:
573 self.dirstate.forget([f])
573 self.dirstate.forget([f])
574
574
575 def remove(self, list, unlink=False):
575 def remove(self, list, unlink=False):
576 if unlink:
576 if unlink:
577 for f in list:
577 for f in list:
578 try:
578 try:
579 util.unlink(self.wjoin(f))
579 util.unlink(self.wjoin(f))
580 except OSError, inst:
580 except OSError, inst:
581 if inst.errno != errno.ENOENT: raise
581 if inst.errno != errno.ENOENT: raise
582 wlock = self.wlock()
582 wlock = self.wlock()
583 for f in list:
583 for f in list:
584 p = self.wjoin(f)
584 p = self.wjoin(f)
585 if os.path.exists(p):
585 if os.path.exists(p):
586 self.ui.warn(_("%s still exists!\n") % f)
586 self.ui.warn(_("%s still exists!\n") % f)
587 elif self.dirstate.state(f) == 'a':
587 elif self.dirstate.state(f) == 'a':
588 self.ui.warn(_("%s never committed!\n") % f)
588 self.ui.warn(_("%s never committed!\n") % f)
589 self.dirstate.forget([f])
589 self.dirstate.forget([f])
590 elif f not in self.dirstate:
590 elif f not in self.dirstate:
591 self.ui.warn(_("%s not tracked!\n") % f)
591 self.ui.warn(_("%s not tracked!\n") % f)
592 else:
592 else:
593 self.dirstate.update([f], "r")
593 self.dirstate.update([f], "r")
594
594
595 def undelete(self, list):
595 def undelete(self, list):
596 p = self.dirstate.parents()[0]
596 p = self.dirstate.parents()[0]
597 mn = self.changelog.read(p)[0]
597 mn = self.changelog.read(p)[0]
598 mf = self.manifest.readflags(mn)
598 mf = self.manifest.readflags(mn)
599 m = self.manifest.read(mn)
599 m = self.manifest.read(mn)
600 wlock = self.wlock()
600 wlock = self.wlock()
601 for f in list:
601 for f in list:
602 if self.dirstate.state(f) not in "r":
602 if self.dirstate.state(f) not in "r":
603 self.ui.warn("%s not removed!\n" % f)
603 self.ui.warn("%s not removed!\n" % f)
604 else:
604 else:
605 t = self.file(f).read(m[f])
605 t = self.file(f).read(m[f])
606 self.wwrite(f, t)
606 self.wwrite(f, t)
607 util.set_exec(self.wjoin(f), mf[f])
607 util.set_exec(self.wjoin(f), mf[f])
608 self.dirstate.update([f], "n")
608 self.dirstate.update([f], "n")
609
609
610 def copy(self, source, dest):
610 def copy(self, source, dest):
611 p = self.wjoin(dest)
611 p = self.wjoin(dest)
612 if not os.path.exists(p):
612 if not os.path.exists(p):
613 self.ui.warn(_("%s does not exist!\n") % dest)
613 self.ui.warn(_("%s does not exist!\n") % dest)
614 elif not os.path.isfile(p):
614 elif not os.path.isfile(p):
615 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
615 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
616 else:
616 else:
617 wlock = self.wlock()
617 wlock = self.wlock()
618 if self.dirstate.state(dest) == '?':
618 if self.dirstate.state(dest) == '?':
619 self.dirstate.update([dest], "a")
619 self.dirstate.update([dest], "a")
620 self.dirstate.copy(source, dest)
620 self.dirstate.copy(source, dest)
621
621
622 def heads(self, start=None):
622 def heads(self, start=None):
623 heads = self.changelog.heads(start)
623 heads = self.changelog.heads(start)
624 # sort the output in rev descending order
624 # sort the output in rev descending order
625 heads = [(-self.changelog.rev(h), h) for h in heads]
625 heads = [(-self.changelog.rev(h), h) for h in heads]
626 heads.sort()
626 heads.sort()
627 return [n for (r, n) in heads]
627 return [n for (r, n) in heads]
628
628
629 # branchlookup returns a dict giving a list of branches for
629 # branchlookup returns a dict giving a list of branches for
630 # each head. A branch is defined as the tag of a node or
630 # each head. A branch is defined as the tag of a node or
631 # the branch of the node's parents. If a node has multiple
631 # the branch of the node's parents. If a node has multiple
632 # branch tags, tags are eliminated if they are visible from other
632 # branch tags, tags are eliminated if they are visible from other
633 # branch tags.
633 # branch tags.
634 #
634 #
635 # So, for this graph: a->b->c->d->e
635 # So, for this graph: a->b->c->d->e
636 # \ /
636 # \ /
637 # aa -----/
637 # aa -----/
638 # a has tag 2.6.12
638 # a has tag 2.6.12
639 # d has tag 2.6.13
639 # d has tag 2.6.13
640 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
640 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
641 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
641 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
642 # from the list.
642 # from the list.
643 #
643 #
644 # It is possible that more than one head will have the same branch tag.
644 # It is possible that more than one head will have the same branch tag.
645 # callers need to check the result for multiple heads under the same
645 # callers need to check the result for multiple heads under the same
646 # branch tag if that is a problem for them (ie checkout of a specific
646 # branch tag if that is a problem for them (ie checkout of a specific
647 # branch).
647 # branch).
648 #
648 #
649 # passing in a specific branch will limit the depth of the search
649 # passing in a specific branch will limit the depth of the search
650 # through the parents. It won't limit the branches returned in the
650 # through the parents. It won't limit the branches returned in the
651 # result though.
651 # result though.
652 def branchlookup(self, heads=None, branch=None):
652 def branchlookup(self, heads=None, branch=None):
653 if not heads:
653 if not heads:
654 heads = self.heads()
654 heads = self.heads()
655 headt = [ h for h in heads ]
655 headt = [ h for h in heads ]
656 chlog = self.changelog
656 chlog = self.changelog
657 branches = {}
657 branches = {}
658 merges = []
658 merges = []
659 seenmerge = {}
659 seenmerge = {}
660
660
661 # traverse the tree once for each head, recording in the branches
661 # traverse the tree once for each head, recording in the branches
662 # dict which tags are visible from this head. The branches
662 # dict which tags are visible from this head. The branches
663 # dict also records which tags are visible from each tag
663 # dict also records which tags are visible from each tag
664 # while we traverse.
664 # while we traverse.
665 while headt or merges:
665 while headt or merges:
666 if merges:
666 if merges:
667 n, found = merges.pop()
667 n, found = merges.pop()
668 visit = [n]
668 visit = [n]
669 else:
669 else:
670 h = headt.pop()
670 h = headt.pop()
671 visit = [h]
671 visit = [h]
672 found = [h]
672 found = [h]
673 seen = {}
673 seen = {}
674 while visit:
674 while visit:
675 n = visit.pop()
675 n = visit.pop()
676 if n in seen:
676 if n in seen:
677 continue
677 continue
678 pp = chlog.parents(n)
678 pp = chlog.parents(n)
679 tags = self.nodetags(n)
679 tags = self.nodetags(n)
680 if tags:
680 if tags:
681 for x in tags:
681 for x in tags:
682 if x == 'tip':
682 if x == 'tip':
683 continue
683 continue
684 for f in found:
684 for f in found:
685 branches.setdefault(f, {})[n] = 1
685 branches.setdefault(f, {})[n] = 1
686 branches.setdefault(n, {})[n] = 1
686 branches.setdefault(n, {})[n] = 1
687 break
687 break
688 if n not in found:
688 if n not in found:
689 found.append(n)
689 found.append(n)
690 if branch in tags:
690 if branch in tags:
691 continue
691 continue
692 seen[n] = 1
692 seen[n] = 1
693 if pp[1] != nullid and n not in seenmerge:
693 if pp[1] != nullid and n not in seenmerge:
694 merges.append((pp[1], [x for x in found]))
694 merges.append((pp[1], [x for x in found]))
695 seenmerge[n] = 1
695 seenmerge[n] = 1
696 if pp[0] != nullid:
696 if pp[0] != nullid:
697 visit.append(pp[0])
697 visit.append(pp[0])
698 # traverse the branches dict, eliminating branch tags from each
698 # traverse the branches dict, eliminating branch tags from each
699 # head that are visible from another branch tag for that head.
699 # head that are visible from another branch tag for that head.
700 out = {}
700 out = {}
701 viscache = {}
701 viscache = {}
702 for h in heads:
702 for h in heads:
703 def visible(node):
703 def visible(node):
704 if node in viscache:
704 if node in viscache:
705 return viscache[node]
705 return viscache[node]
706 ret = {}
706 ret = {}
707 visit = [node]
707 visit = [node]
708 while visit:
708 while visit:
709 x = visit.pop()
709 x = visit.pop()
710 if x in viscache:
710 if x in viscache:
711 ret.update(viscache[x])
711 ret.update(viscache[x])
712 elif x not in ret:
712 elif x not in ret:
713 ret[x] = 1
713 ret[x] = 1
714 if x in branches:
714 if x in branches:
715 visit[len(visit):] = branches[x].keys()
715 visit[len(visit):] = branches[x].keys()
716 viscache[node] = ret
716 viscache[node] = ret
717 return ret
717 return ret
718 if h not in branches:
718 if h not in branches:
719 continue
719 continue
720 # O(n^2), but somewhat limited. This only searches the
720 # O(n^2), but somewhat limited. This only searches the
721 # tags visible from a specific head, not all the tags in the
721 # tags visible from a specific head, not all the tags in the
722 # whole repo.
722 # whole repo.
723 for b in branches[h]:
723 for b in branches[h]:
724 vis = False
724 vis = False
725 for bb in branches[h].keys():
725 for bb in branches[h].keys():
726 if b != bb:
726 if b != bb:
727 if b in visible(bb):
727 if b in visible(bb):
728 vis = True
728 vis = True
729 break
729 break
730 if not vis:
730 if not vis:
731 l = out.setdefault(h, [])
731 l = out.setdefault(h, [])
732 l[len(l):] = self.nodetags(b)
732 l[len(l):] = self.nodetags(b)
733 return out
733 return out
734
734
735 def branches(self, nodes):
735 def branches(self, nodes):
736 if not nodes: nodes = [self.changelog.tip()]
736 if not nodes: nodes = [self.changelog.tip()]
737 b = []
737 b = []
738 for n in nodes:
738 for n in nodes:
739 t = n
739 t = n
740 while n:
740 while n:
741 p = self.changelog.parents(n)
741 p = self.changelog.parents(n)
742 if p[1] != nullid or p[0] == nullid:
742 if p[1] != nullid or p[0] == nullid:
743 b.append((t, n, p[0], p[1]))
743 b.append((t, n, p[0], p[1]))
744 break
744 break
745 n = p[0]
745 n = p[0]
746 return b
746 return b
747
747
748 def between(self, pairs):
748 def between(self, pairs):
749 r = []
749 r = []
750
750
751 for top, bottom in pairs:
751 for top, bottom in pairs:
752 n, l, i = top, [], 0
752 n, l, i = top, [], 0
753 f = 1
753 f = 1
754
754
755 while n != bottom:
755 while n != bottom:
756 p = self.changelog.parents(n)[0]
756 p = self.changelog.parents(n)[0]
757 if i == f:
757 if i == f:
758 l.append(n)
758 l.append(n)
759 f = f * 2
759 f = f * 2
760 n = p
760 n = p
761 i += 1
761 i += 1
762
762
763 r.append(l)
763 r.append(l)
764
764
765 return r
765 return r
766
766
767 def findincoming(self, remote, base=None, heads=None):
767 def findincoming(self, remote, base=None, heads=None):
768 m = self.changelog.nodemap
768 m = self.changelog.nodemap
769 search = []
769 search = []
770 fetch = {}
770 fetch = {}
771 seen = {}
771 seen = {}
772 seenbranch = {}
772 seenbranch = {}
773 if base == None:
773 if base == None:
774 base = {}
774 base = {}
775
775
776 # assume we're closer to the tip than the root
776 # assume we're closer to the tip than the root
777 # and start by examining the heads
777 # and start by examining the heads
778 self.ui.status(_("searching for changes\n"))
778 self.ui.status(_("searching for changes\n"))
779
779
780 if not heads:
780 if not heads:
781 heads = remote.heads()
781 heads = remote.heads()
782
782
783 unknown = []
783 unknown = []
784 for h in heads:
784 for h in heads:
785 if h not in m:
785 if h not in m:
786 unknown.append(h)
786 unknown.append(h)
787 else:
787 else:
788 base[h] = 1
788 base[h] = 1
789
789
790 if not unknown:
790 if not unknown:
791 return None
791 return None
792
792
793 rep = {}
793 rep = {}
794 reqcnt = 0
794 reqcnt = 0
795
795
796 # search through remote branches
796 # search through remote branches
797 # a 'branch' here is a linear segment of history, with four parts:
797 # a 'branch' here is a linear segment of history, with four parts:
798 # head, root, first parent, second parent
798 # head, root, first parent, second parent
799 # (a branch always has two parents (or none) by definition)
799 # (a branch always has two parents (or none) by definition)
800 unknown = remote.branches(unknown)
800 unknown = remote.branches(unknown)
801 while unknown:
801 while unknown:
802 r = []
802 r = []
803 while unknown:
803 while unknown:
804 n = unknown.pop(0)
804 n = unknown.pop(0)
805 if n[0] in seen:
805 if n[0] in seen:
806 continue
806 continue
807
807
808 self.ui.debug(_("examining %s:%s\n") % (short(n[0]), short(n[1])))
808 self.ui.debug(_("examining %s:%s\n") % (short(n[0]), short(n[1])))
809 if n[0] == nullid:
809 if n[0] == nullid:
810 break
810 break
811 if n in seenbranch:
811 if n in seenbranch:
812 self.ui.debug(_("branch already found\n"))
812 self.ui.debug(_("branch already found\n"))
813 continue
813 continue
814 if n[1] and n[1] in m: # do we know the base?
814 if n[1] and n[1] in m: # do we know the base?
815 self.ui.debug(_("found incomplete branch %s:%s\n")
815 self.ui.debug(_("found incomplete branch %s:%s\n")
816 % (short(n[0]), short(n[1])))
816 % (short(n[0]), short(n[1])))
817 search.append(n) # schedule branch range for scanning
817 search.append(n) # schedule branch range for scanning
818 seenbranch[n] = 1
818 seenbranch[n] = 1
819 else:
819 else:
820 if n[1] not in seen and n[1] not in fetch:
820 if n[1] not in seen and n[1] not in fetch:
821 if n[2] in m and n[3] in m:
821 if n[2] in m and n[3] in m:
822 self.ui.debug(_("found new changeset %s\n") %
822 self.ui.debug(_("found new changeset %s\n") %
823 short(n[1]))
823 short(n[1]))
824 fetch[n[1]] = 1 # earliest unknown
824 fetch[n[1]] = 1 # earliest unknown
825 base[n[2]] = 1 # latest known
825 base[n[2]] = 1 # latest known
826 continue
826 continue
827
827
828 for a in n[2:4]:
828 for a in n[2:4]:
829 if a not in rep:
829 if a not in rep:
830 r.append(a)
830 r.append(a)
831 rep[a] = 1
831 rep[a] = 1
832
832
833 seen[n[0]] = 1
833 seen[n[0]] = 1
834
834
835 if r:
835 if r:
836 reqcnt += 1
836 reqcnt += 1
837 self.ui.debug(_("request %d: %s\n") %
837 self.ui.debug(_("request %d: %s\n") %
838 (reqcnt, " ".join(map(short, r))))
838 (reqcnt, " ".join(map(short, r))))
839 for p in range(0, len(r), 10):
839 for p in range(0, len(r), 10):
840 for b in remote.branches(r[p:p+10]):
840 for b in remote.branches(r[p:p+10]):
841 self.ui.debug(_("received %s:%s\n") %
841 self.ui.debug(_("received %s:%s\n") %
842 (short(b[0]), short(b[1])))
842 (short(b[0]), short(b[1])))
843 if b[0] in m:
843 if b[0] in m:
844 self.ui.debug(_("found base node %s\n") % short(b[0]))
844 self.ui.debug(_("found base node %s\n") % short(b[0]))
845 base[b[0]] = 1
845 base[b[0]] = 1
846 elif b[0] not in seen:
846 elif b[0] not in seen:
847 unknown.append(b)
847 unknown.append(b)
848
848
849 # do binary search on the branches we found
849 # do binary search on the branches we found
850 while search:
850 while search:
851 n = search.pop(0)
851 n = search.pop(0)
852 reqcnt += 1
852 reqcnt += 1
853 l = remote.between([(n[0], n[1])])[0]
853 l = remote.between([(n[0], n[1])])[0]
854 l.append(n[1])
854 l.append(n[1])
855 p = n[0]
855 p = n[0]
856 f = 1
856 f = 1
857 for i in l:
857 for i in l:
858 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
858 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
859 if i in m:
859 if i in m:
860 if f <= 2:
860 if f <= 2:
861 self.ui.debug(_("found new branch changeset %s\n") %
861 self.ui.debug(_("found new branch changeset %s\n") %
862 short(p))
862 short(p))
863 fetch[p] = 1
863 fetch[p] = 1
864 base[i] = 1
864 base[i] = 1
865 else:
865 else:
866 self.ui.debug(_("narrowed branch search to %s:%s\n")
866 self.ui.debug(_("narrowed branch search to %s:%s\n")
867 % (short(p), short(i)))
867 % (short(p), short(i)))
868 search.append((p, i))
868 search.append((p, i))
869 break
869 break
870 p, f = i, f * 2
870 p, f = i, f * 2
871
871
872 # sanity check our fetch list
872 # sanity check our fetch list
873 for f in fetch.keys():
873 for f in fetch.keys():
874 if f in m:
874 if f in m:
875 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
875 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
876
876
877 if base.keys() == [nullid]:
877 if base.keys() == [nullid]:
878 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
878 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
879
879
880 self.ui.note(_("found new changesets starting at ") +
880 self.ui.note(_("found new changesets starting at ") +
881 " ".join([short(f) for f in fetch]) + "\n")
881 " ".join([short(f) for f in fetch]) + "\n")
882
882
883 self.ui.debug(_("%d total queries\n") % reqcnt)
883 self.ui.debug(_("%d total queries\n") % reqcnt)
884
884
885 return fetch.keys()
885 return fetch.keys()
886
886
887 def findoutgoing(self, remote, base=None, heads=None):
887 def findoutgoing(self, remote, base=None, heads=None):
888 if base == None:
888 if base == None:
889 base = {}
889 base = {}
890 self.findincoming(remote, base, heads)
890 self.findincoming(remote, base, heads)
891
891
892 self.ui.debug(_("common changesets up to ")
892 self.ui.debug(_("common changesets up to ")
893 + " ".join(map(short, base.keys())) + "\n")
893 + " ".join(map(short, base.keys())) + "\n")
894
894
895 remain = dict.fromkeys(self.changelog.nodemap)
895 remain = dict.fromkeys(self.changelog.nodemap)
896
896
897 # prune everything remote has from the tree
897 # prune everything remote has from the tree
898 del remain[nullid]
898 del remain[nullid]
899 remove = base.keys()
899 remove = base.keys()
900 while remove:
900 while remove:
901 n = remove.pop(0)
901 n = remove.pop(0)
902 if n in remain:
902 if n in remain:
903 del remain[n]
903 del remain[n]
904 for p in self.changelog.parents(n):
904 for p in self.changelog.parents(n):
905 remove.append(p)
905 remove.append(p)
906
906
907 # find every node whose parents have been pruned
907 # find every node whose parents have been pruned
908 subset = []
908 subset = []
909 for n in remain:
909 for n in remain:
910 p1, p2 = self.changelog.parents(n)
910 p1, p2 = self.changelog.parents(n)
911 if p1 not in remain and p2 not in remain:
911 if p1 not in remain and p2 not in remain:
912 subset.append(n)
912 subset.append(n)
913
913
914 # this is the set of all roots we have to push
914 # this is the set of all roots we have to push
915 return subset
915 return subset
916
916
917 def pull(self, remote, heads = None):
917 def pull(self, remote, heads = None):
918 lock = self.lock()
918 lock = self.lock()
919
919
920 # if we have an empty repo, fetch everything
920 # if we have an empty repo, fetch everything
921 if self.changelog.tip() == nullid:
921 if self.changelog.tip() == nullid:
922 self.ui.status(_("requesting all changes\n"))
922 self.ui.status(_("requesting all changes\n"))
923 fetch = [nullid]
923 fetch = [nullid]
924 else:
924 else:
925 fetch = self.findincoming(remote)
925 fetch = self.findincoming(remote)
926
926
927 if not fetch:
927 if not fetch:
928 self.ui.status(_("no changes found\n"))
928 self.ui.status(_("no changes found\n"))
929 return 1
929 return 1
930
930
931 if heads is None:
931 if heads is None:
932 cg = remote.changegroup(fetch)
932 cg = remote.changegroup(fetch)
933 else:
933 else:
934 cg = remote.changegroupsubset(fetch, heads)
934 cg = remote.changegroupsubset(fetch, heads)
935 return self.addchangegroup(cg)
935 return self.addchangegroup(cg)
936
936
937 def push(self, remote, force=False):
937 def push(self, remote, force=False):
938 lock = remote.lock()
938 lock = remote.lock()
939
939
940 base = {}
940 base = {}
941 heads = remote.heads()
941 heads = remote.heads()
942 inc = self.findincoming(remote, base, heads)
942 inc = self.findincoming(remote, base, heads)
943 if not force and inc:
943 if not force and inc:
944 self.ui.warn(_("abort: unsynced remote changes!\n"))
944 self.ui.warn(_("abort: unsynced remote changes!\n"))
945 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
945 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
946 return 1
946 return 1
947
947
948 update = self.findoutgoing(remote, base)
948 update = self.findoutgoing(remote, base)
949 if not update:
949 if not update:
950 self.ui.status(_("no changes found\n"))
950 self.ui.status(_("no changes found\n"))
951 return 1
951 return 1
952 elif not force:
952 elif not force:
953 if len(heads) < len(self.changelog.heads()):
953 if len(heads) < len(self.changelog.heads()):
954 self.ui.warn(_("abort: push creates new remote branches!\n"))
954 self.ui.warn(_("abort: push creates new remote branches!\n"))
955 self.ui.status(_("(did you forget to merge?"
955 self.ui.status(_("(did you forget to merge?"
956 " use push -f to force)\n"))
956 " use push -f to force)\n"))
957 return 1
957 return 1
958
958
959 cg = self.changegroup(update)
959 cg = self.changegroup(update)
960 return remote.addchangegroup(cg)
960 return remote.addchangegroup(cg)
961
961
962 def changegroupsubset(self, bases, heads):
962 def changegroupsubset(self, bases, heads):
963 """This function generates a changegroup consisting of all the nodes
963 """This function generates a changegroup consisting of all the nodes
964 that are descendents of any of the bases, and ancestors of any of
964 that are descendents of any of the bases, and ancestors of any of
965 the heads.
965 the heads.
966
966
967 It is fairly complex as determining which filenodes and which
967 It is fairly complex as determining which filenodes and which
968 manifest nodes need to be included for the changeset to be complete
968 manifest nodes need to be included for the changeset to be complete
969 is non-trivial.
969 is non-trivial.
970
970
971 Another wrinkle is doing the reverse, figuring out which changeset in
971 Another wrinkle is doing the reverse, figuring out which changeset in
972 the changegroup a particular filenode or manifestnode belongs to."""
972 the changegroup a particular filenode or manifestnode belongs to."""
973
973
974 # Set up some initial variables
974 # Set up some initial variables
975 # Make it easy to refer to self.changelog
975 # Make it easy to refer to self.changelog
976 cl = self.changelog
976 cl = self.changelog
977 # msng is short for missing - compute the list of changesets in this
977 # msng is short for missing - compute the list of changesets in this
978 # changegroup.
978 # changegroup.
979 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
979 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
980 # Some bases may turn out to be superfluous, and some heads may be
980 # Some bases may turn out to be superfluous, and some heads may be
981 # too. nodesbetween will return the minimal set of bases and heads
981 # too. nodesbetween will return the minimal set of bases and heads
982 # necessary to re-create the changegroup.
982 # necessary to re-create the changegroup.
983
983
984 # Known heads are the list of heads that it is assumed the recipient
984 # Known heads are the list of heads that it is assumed the recipient
985 # of this changegroup will know about.
985 # of this changegroup will know about.
986 knownheads = {}
986 knownheads = {}
987 # We assume that all parents of bases are known heads.
987 # We assume that all parents of bases are known heads.
988 for n in bases:
988 for n in bases:
989 for p in cl.parents(n):
989 for p in cl.parents(n):
990 if p != nullid:
990 if p != nullid:
991 knownheads[p] = 1
991 knownheads[p] = 1
992 knownheads = knownheads.keys()
992 knownheads = knownheads.keys()
993 if knownheads:
993 if knownheads:
994 # Now that we know what heads are known, we can compute which
994 # Now that we know what heads are known, we can compute which
995 # changesets are known. The recipient must know about all
995 # changesets are known. The recipient must know about all
996 # changesets required to reach the known heads from the null
996 # changesets required to reach the known heads from the null
997 # changeset.
997 # changeset.
998 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
998 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
999 junk = None
999 junk = None
1000 # Transform the list into an ersatz set.
1000 # Transform the list into an ersatz set.
1001 has_cl_set = dict.fromkeys(has_cl_set)
1001 has_cl_set = dict.fromkeys(has_cl_set)
1002 else:
1002 else:
1003 # If there were no known heads, the recipient cannot be assumed to
1003 # If there were no known heads, the recipient cannot be assumed to
1004 # know about any changesets.
1004 # know about any changesets.
1005 has_cl_set = {}
1005 has_cl_set = {}
1006
1006
1007 # Make it easy to refer to self.manifest
1007 # Make it easy to refer to self.manifest
1008 mnfst = self.manifest
1008 mnfst = self.manifest
1009 # We don't know which manifests are missing yet
1009 # We don't know which manifests are missing yet
1010 msng_mnfst_set = {}
1010 msng_mnfst_set = {}
1011 # Nor do we know which filenodes are missing.
1011 # Nor do we know which filenodes are missing.
1012 msng_filenode_set = {}
1012 msng_filenode_set = {}
1013
1013
1014 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1014 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1015 junk = None
1015 junk = None
1016
1016
1017 # A changeset always belongs to itself, so the changenode lookup
1017 # A changeset always belongs to itself, so the changenode lookup
1018 # function for a changenode is identity.
1018 # function for a changenode is identity.
1019 def identity(x):
1019 def identity(x):
1020 return x
1020 return x
1021
1021
1022 # A function generating function. Sets up an environment for the
1022 # A function generating function. Sets up an environment for the
1023 # inner function.
1023 # inner function.
1024 def cmp_by_rev_func(revlog):
1024 def cmp_by_rev_func(revlog):
1025 # Compare two nodes by their revision number in the environment's
1025 # Compare two nodes by their revision number in the environment's
1026 # revision history. Since the revision number both represents the
1026 # revision history. Since the revision number both represents the
1027 # most efficient order to read the nodes in, and represents a
1027 # most efficient order to read the nodes in, and represents a
1028 # topological sorting of the nodes, this function is often useful.
1028 # topological sorting of the nodes, this function is often useful.
1029 def cmp_by_rev(a, b):
1029 def cmp_by_rev(a, b):
1030 return cmp(revlog.rev(a), revlog.rev(b))
1030 return cmp(revlog.rev(a), revlog.rev(b))
1031 return cmp_by_rev
1031 return cmp_by_rev
1032
1032
1033 # If we determine that a particular file or manifest node must be a
1033 # If we determine that a particular file or manifest node must be a
1034 # node that the recipient of the changegroup will already have, we can
1034 # node that the recipient of the changegroup will already have, we can
1035 # also assume the recipient will have all the parents. This function
1035 # also assume the recipient will have all the parents. This function
1036 # prunes them from the set of missing nodes.
1036 # prunes them from the set of missing nodes.
1037 def prune_parents(revlog, hasset, msngset):
1037 def prune_parents(revlog, hasset, msngset):
1038 haslst = hasset.keys()
1038 haslst = hasset.keys()
1039 haslst.sort(cmp_by_rev_func(revlog))
1039 haslst.sort(cmp_by_rev_func(revlog))
1040 for node in haslst:
1040 for node in haslst:
1041 parentlst = [p for p in revlog.parents(node) if p != nullid]
1041 parentlst = [p for p in revlog.parents(node) if p != nullid]
1042 while parentlst:
1042 while parentlst:
1043 n = parentlst.pop()
1043 n = parentlst.pop()
1044 if n not in hasset:
1044 if n not in hasset:
1045 hasset[n] = 1
1045 hasset[n] = 1
1046 p = [p for p in revlog.parents(n) if p != nullid]
1046 p = [p for p in revlog.parents(n) if p != nullid]
1047 parentlst.extend(p)
1047 parentlst.extend(p)
1048 for n in hasset:
1048 for n in hasset:
1049 msngset.pop(n, None)
1049 msngset.pop(n, None)
1050
1050
1051 # This is a function generating function used to set up an environment
1051 # This is a function generating function used to set up an environment
1052 # for the inner function to execute in.
1052 # for the inner function to execute in.
1053 def manifest_and_file_collector(changedfileset):
1053 def manifest_and_file_collector(changedfileset):
1054 # This is an information gathering function that gathers
1054 # This is an information gathering function that gathers
1055 # information from each changeset node that goes out as part of
1055 # information from each changeset node that goes out as part of
1056 # the changegroup. The information gathered is a list of which
1056 # the changegroup. The information gathered is a list of which
1057 # manifest nodes are potentially required (the recipient may
1057 # manifest nodes are potentially required (the recipient may
1058 # already have them) and total list of all files which were
1058 # already have them) and total list of all files which were
1059 # changed in any changeset in the changegroup.
1059 # changed in any changeset in the changegroup.
1060 #
1060 #
1061 # We also remember the first changenode we saw any manifest
1061 # We also remember the first changenode we saw any manifest
1062 # referenced by so we can later determine which changenode 'owns'
1062 # referenced by so we can later determine which changenode 'owns'
1063 # the manifest.
1063 # the manifest.
1064 def collect_manifests_and_files(clnode):
1064 def collect_manifests_and_files(clnode):
1065 c = cl.read(clnode)
1065 c = cl.read(clnode)
1066 for f in c[3]:
1066 for f in c[3]:
1067 # This is to make sure we only have one instance of each
1067 # This is to make sure we only have one instance of each
1068 # filename string for each filename.
1068 # filename string for each filename.
1069 changedfileset.setdefault(f, f)
1069 changedfileset.setdefault(f, f)
1070 msng_mnfst_set.setdefault(c[0], clnode)
1070 msng_mnfst_set.setdefault(c[0], clnode)
1071 return collect_manifests_and_files
1071 return collect_manifests_and_files
1072
1072
1073 # Figure out which manifest nodes (of the ones we think might be part
1073 # Figure out which manifest nodes (of the ones we think might be part
1074 # of the changegroup) the recipient must know about and remove them
1074 # of the changegroup) the recipient must know about and remove them
1075 # from the changegroup.
1075 # from the changegroup.
1076 def prune_manifests():
1076 def prune_manifests():
1077 has_mnfst_set = {}
1077 has_mnfst_set = {}
1078 for n in msng_mnfst_set:
1078 for n in msng_mnfst_set:
1079 # If a 'missing' manifest thinks it belongs to a changenode
1079 # If a 'missing' manifest thinks it belongs to a changenode
1080 # the recipient is assumed to have, obviously the recipient
1080 # the recipient is assumed to have, obviously the recipient
1081 # must have that manifest.
1081 # must have that manifest.
1082 linknode = cl.node(mnfst.linkrev(n))
1082 linknode = cl.node(mnfst.linkrev(n))
1083 if linknode in has_cl_set:
1083 if linknode in has_cl_set:
1084 has_mnfst_set[n] = 1
1084 has_mnfst_set[n] = 1
1085 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1085 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1086
1086
1087 # Use the information collected in collect_manifests_and_files to say
1087 # Use the information collected in collect_manifests_and_files to say
1088 # which changenode any manifestnode belongs to.
1088 # which changenode any manifestnode belongs to.
1089 def lookup_manifest_link(mnfstnode):
1089 def lookup_manifest_link(mnfstnode):
1090 return msng_mnfst_set[mnfstnode]
1090 return msng_mnfst_set[mnfstnode]
1091
1091
1092 # A function generating function that sets up the initial environment
1092 # A function generating function that sets up the initial environment
1093 # the inner function.
1093 # the inner function.
1094 def filenode_collector(changedfiles):
1094 def filenode_collector(changedfiles):
1095 next_rev = [0]
1095 next_rev = [0]
1096 # This gathers information from each manifestnode included in the
1096 # This gathers information from each manifestnode included in the
1097 # changegroup about which filenodes the manifest node references
1097 # changegroup about which filenodes the manifest node references
1098 # so we can include those in the changegroup too.
1098 # so we can include those in the changegroup too.
1099 #
1099 #
1100 # It also remembers which changenode each filenode belongs to. It
1100 # It also remembers which changenode each filenode belongs to. It
1101 # does this by assuming the a filenode belongs to the changenode
1101 # does this by assuming the a filenode belongs to the changenode
1102 # the first manifest that references it belongs to.
1102 # the first manifest that references it belongs to.
1103 def collect_msng_filenodes(mnfstnode):
1103 def collect_msng_filenodes(mnfstnode):
1104 r = mnfst.rev(mnfstnode)
1104 r = mnfst.rev(mnfstnode)
1105 if r == next_rev[0]:
1105 if r == next_rev[0]:
1106 # If the last rev we looked at was the one just previous,
1106 # If the last rev we looked at was the one just previous,
1107 # we only need to see a diff.
1107 # we only need to see a diff.
1108 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1108 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1109 # For each line in the delta
1109 # For each line in the delta
1110 for dline in delta.splitlines():
1110 for dline in delta.splitlines():
1111 # get the filename and filenode for that line
1111 # get the filename and filenode for that line
1112 f, fnode = dline.split('\0')
1112 f, fnode = dline.split('\0')
1113 fnode = bin(fnode[:40])
1113 fnode = bin(fnode[:40])
1114 f = changedfiles.get(f, None)
1114 f = changedfiles.get(f, None)
1115 # And if the file is in the list of files we care
1115 # And if the file is in the list of files we care
1116 # about.
1116 # about.
1117 if f is not None:
1117 if f is not None:
1118 # Get the changenode this manifest belongs to
1118 # Get the changenode this manifest belongs to
1119 clnode = msng_mnfst_set[mnfstnode]
1119 clnode = msng_mnfst_set[mnfstnode]
1120 # Create the set of filenodes for the file if
1120 # Create the set of filenodes for the file if
1121 # there isn't one already.
1121 # there isn't one already.
1122 ndset = msng_filenode_set.setdefault(f, {})
1122 ndset = msng_filenode_set.setdefault(f, {})
1123 # And set the filenode's changelog node to the
1123 # And set the filenode's changelog node to the
1124 # manifest's if it hasn't been set already.
1124 # manifest's if it hasn't been set already.
1125 ndset.setdefault(fnode, clnode)
1125 ndset.setdefault(fnode, clnode)
1126 else:
1126 else:
1127 # Otherwise we need a full manifest.
1127 # Otherwise we need a full manifest.
1128 m = mnfst.read(mnfstnode)
1128 m = mnfst.read(mnfstnode)
1129 # For every file in we care about.
1129 # For every file in we care about.
1130 for f in changedfiles:
1130 for f in changedfiles:
1131 fnode = m.get(f, None)
1131 fnode = m.get(f, None)
1132 # If it's in the manifest
1132 # If it's in the manifest
1133 if fnode is not None:
1133 if fnode is not None:
1134 # See comments above.
1134 # See comments above.
1135 clnode = msng_mnfst_set[mnfstnode]
1135 clnode = msng_mnfst_set[mnfstnode]
1136 ndset = msng_filenode_set.setdefault(f, {})
1136 ndset = msng_filenode_set.setdefault(f, {})
1137 ndset.setdefault(fnode, clnode)
1137 ndset.setdefault(fnode, clnode)
1138 # Remember the revision we hope to see next.
1138 # Remember the revision we hope to see next.
1139 next_rev[0] = r + 1
1139 next_rev[0] = r + 1
1140 return collect_msng_filenodes
1140 return collect_msng_filenodes
1141
1141
1142 # We have a list of filenodes we think we need for a file, lets remove
1142 # We have a list of filenodes we think we need for a file, lets remove
1143 # all those we now the recipient must have.
1143 # all those we now the recipient must have.
1144 def prune_filenodes(f, filerevlog):
1144 def prune_filenodes(f, filerevlog):
1145 msngset = msng_filenode_set[f]
1145 msngset = msng_filenode_set[f]
1146 hasset = {}
1146 hasset = {}
1147 # If a 'missing' filenode thinks it belongs to a changenode we
1147 # If a 'missing' filenode thinks it belongs to a changenode we
1148 # assume the recipient must have, then the recipient must have
1148 # assume the recipient must have, then the recipient must have
1149 # that filenode.
1149 # that filenode.
1150 for n in msngset:
1150 for n in msngset:
1151 clnode = cl.node(filerevlog.linkrev(n))
1151 clnode = cl.node(filerevlog.linkrev(n))
1152 if clnode in has_cl_set:
1152 if clnode in has_cl_set:
1153 hasset[n] = 1
1153 hasset[n] = 1
1154 prune_parents(filerevlog, hasset, msngset)
1154 prune_parents(filerevlog, hasset, msngset)
1155
1155
1156 # A function generator function that sets up the a context for the
1156 # A function generator function that sets up the a context for the
1157 # inner function.
1157 # inner function.
1158 def lookup_filenode_link_func(fname):
1158 def lookup_filenode_link_func(fname):
1159 msngset = msng_filenode_set[fname]
1159 msngset = msng_filenode_set[fname]
1160 # Lookup the changenode the filenode belongs to.
1160 # Lookup the changenode the filenode belongs to.
1161 def lookup_filenode_link(fnode):
1161 def lookup_filenode_link(fnode):
1162 return msngset[fnode]
1162 return msngset[fnode]
1163 return lookup_filenode_link
1163 return lookup_filenode_link
1164
1164
1165 # Now that we have all theses utility functions to help out and
1165 # Now that we have all theses utility functions to help out and
1166 # logically divide up the task, generate the group.
1166 # logically divide up the task, generate the group.
1167 def gengroup():
1167 def gengroup():
1168 # The set of changed files starts empty.
1168 # The set of changed files starts empty.
1169 changedfiles = {}
1169 changedfiles = {}
1170 # Create a changenode group generator that will call our functions
1170 # Create a changenode group generator that will call our functions
1171 # back to lookup the owning changenode and collect information.
1171 # back to lookup the owning changenode and collect information.
1172 group = cl.group(msng_cl_lst, identity,
1172 group = cl.group(msng_cl_lst, identity,
1173 manifest_and_file_collector(changedfiles))
1173 manifest_and_file_collector(changedfiles))
1174 for chnk in group:
1174 for chnk in group:
1175 yield chnk
1175 yield chnk
1176
1176
1177 # The list of manifests has been collected by the generator
1177 # The list of manifests has been collected by the generator
1178 # calling our functions back.
1178 # calling our functions back.
1179 prune_manifests()
1179 prune_manifests()
1180 msng_mnfst_lst = msng_mnfst_set.keys()
1180 msng_mnfst_lst = msng_mnfst_set.keys()
1181 # Sort the manifestnodes by revision number.
1181 # Sort the manifestnodes by revision number.
1182 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1182 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1183 # Create a generator for the manifestnodes that calls our lookup
1183 # Create a generator for the manifestnodes that calls our lookup
1184 # and data collection functions back.
1184 # and data collection functions back.
1185 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1185 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1186 filenode_collector(changedfiles))
1186 filenode_collector(changedfiles))
1187 for chnk in group:
1187 for chnk in group:
1188 yield chnk
1188 yield chnk
1189
1189
1190 # These are no longer needed, dereference and toss the memory for
1190 # These are no longer needed, dereference and toss the memory for
1191 # them.
1191 # them.
1192 msng_mnfst_lst = None
1192 msng_mnfst_lst = None
1193 msng_mnfst_set.clear()
1193 msng_mnfst_set.clear()
1194
1194
1195 changedfiles = changedfiles.keys()
1195 changedfiles = changedfiles.keys()
1196 changedfiles.sort()
1196 changedfiles.sort()
1197 # Go through all our files in order sorted by name.
1197 # Go through all our files in order sorted by name.
1198 for fname in changedfiles:
1198 for fname in changedfiles:
1199 filerevlog = self.file(fname)
1199 filerevlog = self.file(fname)
1200 # Toss out the filenodes that the recipient isn't really
1200 # Toss out the filenodes that the recipient isn't really
1201 # missing.
1201 # missing.
1202 prune_filenodes(fname, filerevlog)
1202 prune_filenodes(fname, filerevlog)
1203 msng_filenode_lst = msng_filenode_set[fname].keys()
1203 msng_filenode_lst = msng_filenode_set[fname].keys()
1204 # If any filenodes are left, generate the group for them,
1204 # If any filenodes are left, generate the group for them,
1205 # otherwise don't bother.
1205 # otherwise don't bother.
1206 if len(msng_filenode_lst) > 0:
1206 if len(msng_filenode_lst) > 0:
1207 yield struct.pack(">l", len(fname) + 4) + fname
1207 yield struct.pack(">l", len(fname) + 4) + fname
1208 # Sort the filenodes by their revision #
1208 # Sort the filenodes by their revision #
1209 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1209 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1210 # Create a group generator and only pass in a changenode
1210 # Create a group generator and only pass in a changenode
1211 # lookup function as we need to collect no information
1211 # lookup function as we need to collect no information
1212 # from filenodes.
1212 # from filenodes.
1213 group = filerevlog.group(msng_filenode_lst,
1213 group = filerevlog.group(msng_filenode_lst,
1214 lookup_filenode_link_func(fname))
1214 lookup_filenode_link_func(fname))
1215 for chnk in group:
1215 for chnk in group:
1216 yield chnk
1216 yield chnk
1217 # Don't need this anymore, toss it to free memory.
1217 # Don't need this anymore, toss it to free memory.
1218 del msng_filenode_set[fname]
1218 del msng_filenode_set[fname]
1219 # Signal that no more groups are left.
1219 # Signal that no more groups are left.
1220 yield struct.pack(">l", 0)
1220 yield struct.pack(">l", 0)
1221
1221
1222 return util.chunkbuffer(gengroup())
1222 return util.chunkbuffer(gengroup())
1223
1223
1224 def changegroup(self, basenodes):
1224 def changegroup(self, basenodes):
1225 """Generate a changegroup of all nodes that we have that a recipient
1225 """Generate a changegroup of all nodes that we have that a recipient
1226 doesn't.
1226 doesn't.
1227
1227
1228 This is much easier than the previous function as we can assume that
1228 This is much easier than the previous function as we can assume that
1229 the recipient has any changenode we aren't sending them."""
1229 the recipient has any changenode we aren't sending them."""
1230 cl = self.changelog
1230 cl = self.changelog
1231 nodes = cl.nodesbetween(basenodes, None)[0]
1231 nodes = cl.nodesbetween(basenodes, None)[0]
1232 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1232 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1233
1233
1234 def identity(x):
1234 def identity(x):
1235 return x
1235 return x
1236
1236
1237 def gennodelst(revlog):
1237 def gennodelst(revlog):
1238 for r in xrange(0, revlog.count()):
1238 for r in xrange(0, revlog.count()):
1239 n = revlog.node(r)
1239 n = revlog.node(r)
1240 if revlog.linkrev(n) in revset:
1240 if revlog.linkrev(n) in revset:
1241 yield n
1241 yield n
1242
1242
1243 def changed_file_collector(changedfileset):
1243 def changed_file_collector(changedfileset):
1244 def collect_changed_files(clnode):
1244 def collect_changed_files(clnode):
1245 c = cl.read(clnode)
1245 c = cl.read(clnode)
1246 for fname in c[3]:
1246 for fname in c[3]:
1247 changedfileset[fname] = 1
1247 changedfileset[fname] = 1
1248 return collect_changed_files
1248 return collect_changed_files
1249
1249
1250 def lookuprevlink_func(revlog):
1250 def lookuprevlink_func(revlog):
1251 def lookuprevlink(n):
1251 def lookuprevlink(n):
1252 return cl.node(revlog.linkrev(n))
1252 return cl.node(revlog.linkrev(n))
1253 return lookuprevlink
1253 return lookuprevlink
1254
1254
1255 def gengroup():
1255 def gengroup():
1256 # construct a list of all changed files
1256 # construct a list of all changed files
1257 changedfiles = {}
1257 changedfiles = {}
1258
1258
1259 for chnk in cl.group(nodes, identity,
1259 for chnk in cl.group(nodes, identity,
1260 changed_file_collector(changedfiles)):
1260 changed_file_collector(changedfiles)):
1261 yield chnk
1261 yield chnk
1262 changedfiles = changedfiles.keys()
1262 changedfiles = changedfiles.keys()
1263 changedfiles.sort()
1263 changedfiles.sort()
1264
1264
1265 mnfst = self.manifest
1265 mnfst = self.manifest
1266 nodeiter = gennodelst(mnfst)
1266 nodeiter = gennodelst(mnfst)
1267 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1267 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1268 yield chnk
1268 yield chnk
1269
1269
1270 for fname in changedfiles:
1270 for fname in changedfiles:
1271 filerevlog = self.file(fname)
1271 filerevlog = self.file(fname)
1272 nodeiter = gennodelst(filerevlog)
1272 nodeiter = gennodelst(filerevlog)
1273 nodeiter = list(nodeiter)
1273 nodeiter = list(nodeiter)
1274 if nodeiter:
1274 if nodeiter:
1275 yield struct.pack(">l", len(fname) + 4) + fname
1275 yield struct.pack(">l", len(fname) + 4) + fname
1276 lookup = lookuprevlink_func(filerevlog)
1276 lookup = lookuprevlink_func(filerevlog)
1277 for chnk in filerevlog.group(nodeiter, lookup):
1277 for chnk in filerevlog.group(nodeiter, lookup):
1278 yield chnk
1278 yield chnk
1279
1279
1280 yield struct.pack(">l", 0)
1280 yield struct.pack(">l", 0)
1281
1281
1282 return util.chunkbuffer(gengroup())
1282 return util.chunkbuffer(gengroup())
1283
1283
1284 def addchangegroup(self, source):
1284 def addchangegroup(self, source):
1285
1285
1286 def getchunk():
1286 def getchunk():
1287 d = source.read(4)
1287 d = source.read(4)
1288 if not d: return ""
1288 if not d: return ""
1289 l = struct.unpack(">l", d)[0]
1289 l = struct.unpack(">l", d)[0]
1290 if l <= 4: return ""
1290 if l <= 4: return ""
1291 d = source.read(l - 4)
1291 d = source.read(l - 4)
1292 if len(d) < l - 4:
1292 if len(d) < l - 4:
1293 raise repo.RepoError(_("premature EOF reading chunk"
1293 raise repo.RepoError(_("premature EOF reading chunk"
1294 " (got %d bytes, expected %d)")
1294 " (got %d bytes, expected %d)")
1295 % (len(d), l - 4))
1295 % (len(d), l - 4))
1296 return d
1296 return d
1297
1297
1298 def getgroup():
1298 def getgroup():
1299 while 1:
1299 while 1:
1300 c = getchunk()
1300 c = getchunk()
1301 if not c: break
1301 if not c: break
1302 yield c
1302 yield c
1303
1303
1304 def csmap(x):
1304 def csmap(x):
1305 self.ui.debug(_("add changeset %s\n") % short(x))
1305 self.ui.debug(_("add changeset %s\n") % short(x))
1306 return self.changelog.count()
1306 return self.changelog.count()
1307
1307
1308 def revmap(x):
1308 def revmap(x):
1309 return self.changelog.rev(x)
1309 return self.changelog.rev(x)
1310
1310
1311 if not source: return
1311 if not source: return
1312 changesets = files = revisions = 0
1312 changesets = files = revisions = 0
1313
1313
1314 tr = self.transaction()
1314 tr = self.transaction()
1315
1315
1316 oldheads = len(self.changelog.heads())
1316 oldheads = len(self.changelog.heads())
1317
1317
1318 # pull off the changeset group
1318 # pull off the changeset group
1319 self.ui.status(_("adding changesets\n"))
1319 self.ui.status(_("adding changesets\n"))
1320 co = self.changelog.tip()
1320 co = self.changelog.tip()
1321 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1321 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1322 cnr, cor = map(self.changelog.rev, (cn, co))
1322 cnr, cor = map(self.changelog.rev, (cn, co))
1323 if cn == nullid:
1323 if cn == nullid:
1324 cnr = cor
1324 cnr = cor
1325 changesets = cnr - cor
1325 changesets = cnr - cor
1326
1326
1327 # pull off the manifest group
1327 # pull off the manifest group
1328 self.ui.status(_("adding manifests\n"))
1328 self.ui.status(_("adding manifests\n"))
1329 mm = self.manifest.tip()
1329 mm = self.manifest.tip()
1330 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1330 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1331
1331
1332 # process the files
1332 # process the files
1333 self.ui.status(_("adding file changes\n"))
1333 self.ui.status(_("adding file changes\n"))
1334 while 1:
1334 while 1:
1335 f = getchunk()
1335 f = getchunk()
1336 if not f: break
1336 if not f: break
1337 self.ui.debug(_("adding %s revisions\n") % f)
1337 self.ui.debug(_("adding %s revisions\n") % f)
1338 fl = self.file(f)
1338 fl = self.file(f)
1339 o = fl.count()
1339 o = fl.count()
1340 n = fl.addgroup(getgroup(), revmap, tr)
1340 n = fl.addgroup(getgroup(), revmap, tr)
1341 revisions += fl.count() - o
1341 revisions += fl.count() - o
1342 files += 1
1342 files += 1
1343
1343
1344 newheads = len(self.changelog.heads())
1344 newheads = len(self.changelog.heads())
1345 heads = ""
1345 heads = ""
1346 if oldheads and newheads > oldheads:
1346 if oldheads and newheads > oldheads:
1347 heads = _(" (+%d heads)") % (newheads - oldheads)
1347 heads = _(" (+%d heads)") % (newheads - oldheads)
1348
1348
1349 self.ui.status(_("added %d changesets"
1349 self.ui.status(_("added %d changesets"
1350 " with %d changes to %d files%s\n")
1350 " with %d changes to %d files%s\n")
1351 % (changesets, revisions, files, heads))
1351 % (changesets, revisions, files, heads))
1352
1352
1353 tr.close()
1353 tr.close()
1354
1354
1355 if changesets > 0:
1355 if changesets > 0:
1356 if not self.hook("changegroup",
1356 if not self.hook("changegroup",
1357 node=hex(self.changelog.node(cor+1))):
1357 node=hex(self.changelog.node(cor+1))):
1358 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1358 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1359 return 1
1359 return 1
1360
1360
1361 for i in range(cor + 1, cnr + 1):
1361 for i in range(cor + 1, cnr + 1):
1362 self.hook("commit", node=hex(self.changelog.node(i)))
1362 self.hook("commit", node=hex(self.changelog.node(i)))
1363
1363
1364 return
1364 return
1365
1365
1366 def update(self, node, allow=False, force=False, choose=None,
1366 def update(self, node, allow=False, force=False, choose=None,
1367 moddirstate=True, forcemerge=False):
1367 moddirstate=True, forcemerge=False):
1368 pl = self.dirstate.parents()
1368 pl = self.dirstate.parents()
1369 if not force and pl[1] != nullid:
1369 if not force and pl[1] != nullid:
1370 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1370 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1371 return 1
1371 return 1
1372
1372
1373 p1, p2 = pl[0], node
1373 p1, p2 = pl[0], node
1374 pa = self.changelog.ancestor(p1, p2)
1374 pa = self.changelog.ancestor(p1, p2)
1375 m1n = self.changelog.read(p1)[0]
1375 m1n = self.changelog.read(p1)[0]
1376 m2n = self.changelog.read(p2)[0]
1376 m2n = self.changelog.read(p2)[0]
1377 man = self.manifest.ancestor(m1n, m2n)
1377 man = self.manifest.ancestor(m1n, m2n)
1378 m1 = self.manifest.read(m1n)
1378 m1 = self.manifest.read(m1n)
1379 mf1 = self.manifest.readflags(m1n)
1379 mf1 = self.manifest.readflags(m1n)
1380 m2 = self.manifest.read(m2n)
1380 m2 = self.manifest.read(m2n)
1381 mf2 = self.manifest.readflags(m2n)
1381 mf2 = self.manifest.readflags(m2n)
1382 ma = self.manifest.read(man)
1382 ma = self.manifest.read(man)
1383 mfa = self.manifest.readflags(man)
1383 mfa = self.manifest.readflags(man)
1384
1384
1385 (c, a, d, u) = self.changes()
1385 (c, a, d, u) = self.changes()
1386
1386
1387 if allow and not forcemerge:
1387 if allow and not forcemerge:
1388 if c or a or d:
1388 if c or a or d:
1389 raise util.Abort(_("outstanding uncommited changes"))
1389 raise util.Abort(_("outstanding uncommited changes"))
1390 if not forcemerge and not force:
1390 if not forcemerge and not force:
1391 for f in u:
1391 for f in u:
1392 if f in m2:
1392 if f in m2:
1393 t1 = self.wread(f)
1393 t1 = self.wread(f)
1394 t2 = self.file(f).read(m2[f])
1394 t2 = self.file(f).read(m2[f])
1395 if cmp(t1, t2) != 0:
1395 if cmp(t1, t2) != 0:
1396 raise util.Abort(_("'%s' already exists in the working"
1396 raise util.Abort(_("'%s' already exists in the working"
1397 " dir and differs from remote") % f)
1397 " dir and differs from remote") % f)
1398
1398
1399 # is this a jump, or a merge? i.e. is there a linear path
1399 # is this a jump, or a merge? i.e. is there a linear path
1400 # from p1 to p2?
1400 # from p1 to p2?
1401 linear_path = (pa == p1 or pa == p2)
1401 linear_path = (pa == p1 or pa == p2)
1402
1402
1403 # resolve the manifest to determine which files
1403 # resolve the manifest to determine which files
1404 # we care about merging
1404 # we care about merging
1405 self.ui.note(_("resolving manifests\n"))
1405 self.ui.note(_("resolving manifests\n"))
1406 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1406 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1407 (force, allow, moddirstate, linear_path))
1407 (force, allow, moddirstate, linear_path))
1408 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1408 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1409 (short(man), short(m1n), short(m2n)))
1409 (short(man), short(m1n), short(m2n)))
1410
1410
1411 merge = {}
1411 merge = {}
1412 get = {}
1412 get = {}
1413 remove = []
1413 remove = []
1414
1414
1415 # construct a working dir manifest
1415 # construct a working dir manifest
1416 mw = m1.copy()
1416 mw = m1.copy()
1417 mfw = mf1.copy()
1417 mfw = mf1.copy()
1418 umap = dict.fromkeys(u)
1418 umap = dict.fromkeys(u)
1419
1419
1420 for f in a + c + u:
1420 for f in a + c + u:
1421 mw[f] = ""
1421 mw[f] = ""
1422 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1422 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1423
1423
1424 if moddirstate:
1424 if moddirstate:
1425 wlock = self.wlock()
1425 wlock = self.wlock()
1426
1426
1427 for f in d:
1427 for f in d:
1428 if f in mw: del mw[f]
1428 if f in mw: del mw[f]
1429
1429
1430 # If we're jumping between revisions (as opposed to merging),
1430 # If we're jumping between revisions (as opposed to merging),
1431 # and if neither the working directory nor the target rev has
1431 # and if neither the working directory nor the target rev has
1432 # the file, then we need to remove it from the dirstate, to
1432 # the file, then we need to remove it from the dirstate, to
1433 # prevent the dirstate from listing the file when it is no
1433 # prevent the dirstate from listing the file when it is no
1434 # longer in the manifest.
1434 # longer in the manifest.
1435 if moddirstate and linear_path and f not in m2:
1435 if moddirstate and linear_path and f not in m2:
1436 self.dirstate.forget((f,))
1436 self.dirstate.forget((f,))
1437
1437
1438 # Compare manifests
1438 # Compare manifests
1439 for f, n in mw.iteritems():
1439 for f, n in mw.iteritems():
1440 if choose and not choose(f): continue
1440 if choose and not choose(f): continue
1441 if f in m2:
1441 if f in m2:
1442 s = 0
1442 s = 0
1443
1443
1444 # is the wfile new since m1, and match m2?
1444 # is the wfile new since m1, and match m2?
1445 if f not in m1:
1445 if f not in m1:
1446 t1 = self.wread(f)
1446 t1 = self.wread(f)
1447 t2 = self.file(f).read(m2[f])
1447 t2 = self.file(f).read(m2[f])
1448 if cmp(t1, t2) == 0:
1448 if cmp(t1, t2) == 0:
1449 n = m2[f]
1449 n = m2[f]
1450 del t1, t2
1450 del t1, t2
1451
1451
1452 # are files different?
1452 # are files different?
1453 if n != m2[f]:
1453 if n != m2[f]:
1454 a = ma.get(f, nullid)
1454 a = ma.get(f, nullid)
1455 # are both different from the ancestor?
1455 # are both different from the ancestor?
1456 if n != a and m2[f] != a:
1456 if n != a and m2[f] != a:
1457 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1457 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1458 # merge executable bits
1458 # merge executable bits
1459 # "if we changed or they changed, change in merge"
1459 # "if we changed or they changed, change in merge"
1460 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1460 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1461 mode = ((a^b) | (a^c)) ^ a
1461 mode = ((a^b) | (a^c)) ^ a
1462 merge[f] = (m1.get(f, nullid), m2[f], mode)
1462 merge[f] = (m1.get(f, nullid), m2[f], mode)
1463 s = 1
1463 s = 1
1464 # are we clobbering?
1464 # are we clobbering?
1465 # is remote's version newer?
1465 # is remote's version newer?
1466 # or are we going back in time?
1466 # or are we going back in time?
1467 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1467 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1468 self.ui.debug(_(" remote %s is newer, get\n") % f)
1468 self.ui.debug(_(" remote %s is newer, get\n") % f)
1469 get[f] = m2[f]
1469 get[f] = m2[f]
1470 s = 1
1470 s = 1
1471 elif f in umap:
1471 elif f in umap:
1472 # this unknown file is the same as the checkout
1472 # this unknown file is the same as the checkout
1473 get[f] = m2[f]
1473 get[f] = m2[f]
1474
1474
1475 if not s and mfw[f] != mf2[f]:
1475 if not s and mfw[f] != mf2[f]:
1476 if force:
1476 if force:
1477 self.ui.debug(_(" updating permissions for %s\n") % f)
1477 self.ui.debug(_(" updating permissions for %s\n") % f)
1478 util.set_exec(self.wjoin(f), mf2[f])
1478 util.set_exec(self.wjoin(f), mf2[f])
1479 else:
1479 else:
1480 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1480 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1481 mode = ((a^b) | (a^c)) ^ a
1481 mode = ((a^b) | (a^c)) ^ a
1482 if mode != b:
1482 if mode != b:
1483 self.ui.debug(_(" updating permissions for %s\n") % f)
1483 self.ui.debug(_(" updating permissions for %s\n") % f)
1484 util.set_exec(self.wjoin(f), mode)
1484 util.set_exec(self.wjoin(f), mode)
1485 del m2[f]
1485 del m2[f]
1486 elif f in ma:
1486 elif f in ma:
1487 if n != ma[f]:
1487 if n != ma[f]:
1488 r = _("d")
1488 r = _("d")
1489 if not force and (linear_path or allow):
1489 if not force and (linear_path or allow):
1490 r = self.ui.prompt(
1490 r = self.ui.prompt(
1491 (_(" local changed %s which remote deleted\n") % f) +
1491 (_(" local changed %s which remote deleted\n") % f) +
1492 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1492 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1493 if r == _("d"):
1493 if r == _("d"):
1494 remove.append(f)
1494 remove.append(f)
1495 else:
1495 else:
1496 self.ui.debug(_("other deleted %s\n") % f)
1496 self.ui.debug(_("other deleted %s\n") % f)
1497 remove.append(f) # other deleted it
1497 remove.append(f) # other deleted it
1498 else:
1498 else:
1499 # file is created on branch or in working directory
1499 # file is created on branch or in working directory
1500 if force and f not in umap:
1500 if force and f not in umap:
1501 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1501 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1502 remove.append(f)
1502 remove.append(f)
1503 elif n == m1.get(f, nullid): # same as parent
1503 elif n == m1.get(f, nullid): # same as parent
1504 if p2 == pa: # going backwards?
1504 if p2 == pa: # going backwards?
1505 self.ui.debug(_("remote deleted %s\n") % f)
1505 self.ui.debug(_("remote deleted %s\n") % f)
1506 remove.append(f)
1506 remove.append(f)
1507 else:
1507 else:
1508 self.ui.debug(_("local modified %s, keeping\n") % f)
1508 self.ui.debug(_("local modified %s, keeping\n") % f)
1509 else:
1509 else:
1510 self.ui.debug(_("working dir created %s, keeping\n") % f)
1510 self.ui.debug(_("working dir created %s, keeping\n") % f)
1511
1511
1512 for f, n in m2.iteritems():
1512 for f, n in m2.iteritems():
1513 if choose and not choose(f): continue
1513 if choose and not choose(f): continue
1514 if f[0] == "/": continue
1514 if f[0] == "/": continue
1515 if f in ma and n != ma[f]:
1515 if f in ma and n != ma[f]:
1516 r = _("k")
1516 r = _("k")
1517 if not force and (linear_path or allow):
1517 if not force and (linear_path or allow):
1518 r = self.ui.prompt(
1518 r = self.ui.prompt(
1519 (_("remote changed %s which local deleted\n") % f) +
1519 (_("remote changed %s which local deleted\n") % f) +
1520 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1520 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1521 if r == _("k"): get[f] = n
1521 if r == _("k"): get[f] = n
1522 elif f not in ma:
1522 elif f not in ma:
1523 self.ui.debug(_("remote created %s\n") % f)
1523 self.ui.debug(_("remote created %s\n") % f)
1524 get[f] = n
1524 get[f] = n
1525 else:
1525 else:
1526 if force or p2 == pa: # going backwards?
1526 if force or p2 == pa: # going backwards?
1527 self.ui.debug(_("local deleted %s, recreating\n") % f)
1527 self.ui.debug(_("local deleted %s, recreating\n") % f)
1528 get[f] = n
1528 get[f] = n
1529 else:
1529 else:
1530 self.ui.debug(_("local deleted %s\n") % f)
1530 self.ui.debug(_("local deleted %s\n") % f)
1531
1531
1532 del mw, m1, m2, ma
1532 del mw, m1, m2, ma
1533
1533
1534 if force:
1534 if force:
1535 for f in merge:
1535 for f in merge:
1536 get[f] = merge[f][1]
1536 get[f] = merge[f][1]
1537 merge = {}
1537 merge = {}
1538
1538
1539 if linear_path or force:
1539 if linear_path or force:
1540 # we don't need to do any magic, just jump to the new rev
1540 # we don't need to do any magic, just jump to the new rev
1541 branch_merge = False
1541 branch_merge = False
1542 p1, p2 = p2, nullid
1542 p1, p2 = p2, nullid
1543 else:
1543 else:
1544 if not allow:
1544 if not allow:
1545 self.ui.status(_("this update spans a branch"
1545 self.ui.status(_("this update spans a branch"
1546 " affecting the following files:\n"))
1546 " affecting the following files:\n"))
1547 fl = merge.keys() + get.keys()
1547 fl = merge.keys() + get.keys()
1548 fl.sort()
1548 fl.sort()
1549 for f in fl:
1549 for f in fl:
1550 cf = ""
1550 cf = ""
1551 if f in merge: cf = _(" (resolve)")
1551 if f in merge: cf = _(" (resolve)")
1552 self.ui.status(" %s%s\n" % (f, cf))
1552 self.ui.status(" %s%s\n" % (f, cf))
1553 self.ui.warn(_("aborting update spanning branches!\n"))
1553 self.ui.warn(_("aborting update spanning branches!\n"))
1554 self.ui.status(_("(use update -m to merge across branches"
1554 self.ui.status(_("(use update -m to merge across branches"
1555 " or -C to lose changes)\n"))
1555 " or -C to lose changes)\n"))
1556 return 1
1556 return 1
1557 branch_merge = True
1557 branch_merge = True
1558
1558
1559 # get the files we don't need to change
1559 # get the files we don't need to change
1560 files = get.keys()
1560 files = get.keys()
1561 files.sort()
1561 files.sort()
1562 for f in files:
1562 for f in files:
1563 if f[0] == "/": continue
1563 if f[0] == "/": continue
1564 self.ui.note(_("getting %s\n") % f)
1564 self.ui.note(_("getting %s\n") % f)
1565 t = self.file(f).read(get[f])
1565 t = self.file(f).read(get[f])
1566 self.wwrite(f, t)
1566 self.wwrite(f, t)
1567 util.set_exec(self.wjoin(f), mf2[f])
1567 util.set_exec(self.wjoin(f), mf2[f])
1568 if moddirstate:
1568 if moddirstate:
1569 if branch_merge:
1569 if branch_merge:
1570 self.dirstate.update([f], 'n', st_mtime=-1)
1570 self.dirstate.update([f], 'n', st_mtime=-1)
1571 else:
1571 else:
1572 self.dirstate.update([f], 'n')
1572 self.dirstate.update([f], 'n')
1573
1573
1574 # merge the tricky bits
1574 # merge the tricky bits
1575 files = merge.keys()
1575 files = merge.keys()
1576 files.sort()
1576 files.sort()
1577 for f in files:
1577 for f in files:
1578 self.ui.status(_("merging %s\n") % f)
1578 self.ui.status(_("merging %s\n") % f)
1579 my, other, flag = merge[f]
1579 my, other, flag = merge[f]
1580 self.merge3(f, my, other)
1580 self.merge3(f, my, other)
1581 util.set_exec(self.wjoin(f), flag)
1581 util.set_exec(self.wjoin(f), flag)
1582 if moddirstate:
1582 if moddirstate:
1583 if branch_merge:
1583 if branch_merge:
1584 # We've done a branch merge, mark this file as merged
1584 # We've done a branch merge, mark this file as merged
1585 # so that we properly record the merger later
1585 # so that we properly record the merger later
1586 self.dirstate.update([f], 'm')
1586 self.dirstate.update([f], 'm')
1587 else:
1587 else:
1588 # We've update-merged a locally modified file, so
1588 # We've update-merged a locally modified file, so
1589 # we set the dirstate to emulate a normal checkout
1589 # we set the dirstate to emulate a normal checkout
1590 # of that file some time in the past. Thus our
1590 # of that file some time in the past. Thus our
1591 # merge will appear as a normal local file
1591 # merge will appear as a normal local file
1592 # modification.
1592 # modification.
1593 f_len = len(self.file(f).read(other))
1593 f_len = len(self.file(f).read(other))
1594 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1594 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1595
1595
1596 remove.sort()
1596 remove.sort()
1597 for f in remove:
1597 for f in remove:
1598 self.ui.note(_("removing %s\n") % f)
1598 self.ui.note(_("removing %s\n") % f)
1599 try:
1599 try:
1600 util.unlink(self.wjoin(f))
1600 util.unlink(self.wjoin(f))
1601 except OSError, inst:
1601 except OSError, inst:
1602 if inst.errno != errno.ENOENT:
1602 if inst.errno != errno.ENOENT:
1603 self.ui.warn(_("update failed to remove %s: %s!\n") %
1603 self.ui.warn(_("update failed to remove %s: %s!\n") %
1604 (f, inst.strerror))
1604 (f, inst.strerror))
1605 if moddirstate:
1605 if moddirstate:
1606 if branch_merge:
1606 if branch_merge:
1607 self.dirstate.update(remove, 'r')
1607 self.dirstate.update(remove, 'r')
1608 else:
1608 else:
1609 self.dirstate.forget(remove)
1609 self.dirstate.forget(remove)
1610
1610
1611 if moddirstate:
1611 if moddirstate:
1612 self.dirstate.setparents(p1, p2)
1612 self.dirstate.setparents(p1, p2)
1613
1613
1614 def merge3(self, fn, my, other):
1614 def merge3(self, fn, my, other):
1615 """perform a 3-way merge in the working directory"""
1615 """perform a 3-way merge in the working directory"""
1616
1616
1617 def temp(prefix, node):
1617 def temp(prefix, node):
1618 pre = "%s~%s." % (os.path.basename(fn), prefix)
1618 pre = "%s~%s." % (os.path.basename(fn), prefix)
1619 (fd, name) = tempfile.mkstemp("", pre)
1619 (fd, name) = tempfile.mkstemp("", pre)
1620 f = os.fdopen(fd, "wb")
1620 f = os.fdopen(fd, "wb")
1621 self.wwrite(fn, fl.read(node), f)
1621 self.wwrite(fn, fl.read(node), f)
1622 f.close()
1622 f.close()
1623 return name
1623 return name
1624
1624
1625 fl = self.file(fn)
1625 fl = self.file(fn)
1626 base = fl.ancestor(my, other)
1626 base = fl.ancestor(my, other)
1627 a = self.wjoin(fn)
1627 a = self.wjoin(fn)
1628 b = temp("base", base)
1628 b = temp("base", base)
1629 c = temp("other", other)
1629 c = temp("other", other)
1630
1630
1631 self.ui.note(_("resolving %s\n") % fn)
1631 self.ui.note(_("resolving %s\n") % fn)
1632 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1632 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1633 (fn, short(my), short(other), short(base)))
1633 (fn, short(my), short(other), short(base)))
1634
1634
1635 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1635 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1636 or "hgmerge")
1636 or "hgmerge")
1637 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1637 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1638 if r:
1638 if r:
1639 self.ui.warn(_("merging %s failed!\n") % fn)
1639 self.ui.warn(_("merging %s failed!\n") % fn)
1640
1640
1641 os.unlink(b)
1641 os.unlink(b)
1642 os.unlink(c)
1642 os.unlink(c)
1643
1643
1644 def verify(self):
1644 def verify(self):
1645 filelinkrevs = {}
1645 filelinkrevs = {}
1646 filenodes = {}
1646 filenodes = {}
1647 changesets = revisions = files = 0
1647 changesets = revisions = files = 0
1648 errors = [0]
1648 errors = [0]
1649 neededmanifests = {}
1649 neededmanifests = {}
1650
1650
1651 def err(msg):
1651 def err(msg):
1652 self.ui.warn(msg + "\n")
1652 self.ui.warn(msg + "\n")
1653 errors[0] += 1
1653 errors[0] += 1
1654
1654
1655 seen = {}
1655 seen = {}
1656 self.ui.status(_("checking changesets\n"))
1656 self.ui.status(_("checking changesets\n"))
1657 d = self.changelog.checksize()
1657 d = self.changelog.checksize()
1658 if d:
1658 if d:
1659 err(_("changeset data short %d bytes") % d)
1659 err(_("changeset data short %d bytes") % d)
1660 for i in range(self.changelog.count()):
1660 for i in range(self.changelog.count()):
1661 changesets += 1
1661 changesets += 1
1662 n = self.changelog.node(i)
1662 n = self.changelog.node(i)
1663 l = self.changelog.linkrev(n)
1663 l = self.changelog.linkrev(n)
1664 if l != i:
1664 if l != i:
1665 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1665 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1666 if n in seen:
1666 if n in seen:
1667 err(_("duplicate changeset at revision %d") % i)
1667 err(_("duplicate changeset at revision %d") % i)
1668 seen[n] = 1
1668 seen[n] = 1
1669
1669
1670 for p in self.changelog.parents(n):
1670 for p in self.changelog.parents(n):
1671 if p not in self.changelog.nodemap:
1671 if p not in self.changelog.nodemap:
1672 err(_("changeset %s has unknown parent %s") %
1672 err(_("changeset %s has unknown parent %s") %
1673 (short(n), short(p)))
1673 (short(n), short(p)))
1674 try:
1674 try:
1675 changes = self.changelog.read(n)
1675 changes = self.changelog.read(n)
1676 except KeyboardInterrupt:
1676 except KeyboardInterrupt:
1677 self.ui.warn(_("interrupted"))
1677 self.ui.warn(_("interrupted"))
1678 raise
1678 raise
1679 except Exception, inst:
1679 except Exception, inst:
1680 err(_("unpacking changeset %s: %s") % (short(n), inst))
1680 err(_("unpacking changeset %s: %s") % (short(n), inst))
1681
1681
1682 neededmanifests[changes[0]] = n
1682 neededmanifests[changes[0]] = n
1683
1683
1684 for f in changes[3]:
1684 for f in changes[3]:
1685 filelinkrevs.setdefault(f, []).append(i)
1685 filelinkrevs.setdefault(f, []).append(i)
1686
1686
1687 seen = {}
1687 seen = {}
1688 self.ui.status(_("checking manifests\n"))
1688 self.ui.status(_("checking manifests\n"))
1689 d = self.manifest.checksize()
1689 d = self.manifest.checksize()
1690 if d:
1690 if d:
1691 err(_("manifest data short %d bytes") % d)
1691 err(_("manifest data short %d bytes") % d)
1692 for i in range(self.manifest.count()):
1692 for i in range(self.manifest.count()):
1693 n = self.manifest.node(i)
1693 n = self.manifest.node(i)
1694 l = self.manifest.linkrev(n)
1694 l = self.manifest.linkrev(n)
1695
1695
1696 if l < 0 or l >= self.changelog.count():
1696 if l < 0 or l >= self.changelog.count():
1697 err(_("bad manifest link (%d) at revision %d") % (l, i))
1697 err(_("bad manifest link (%d) at revision %d") % (l, i))
1698
1698
1699 if n in neededmanifests:
1699 if n in neededmanifests:
1700 del neededmanifests[n]
1700 del neededmanifests[n]
1701
1701
1702 if n in seen:
1702 if n in seen:
1703 err(_("duplicate manifest at revision %d") % i)
1703 err(_("duplicate manifest at revision %d") % i)
1704
1704
1705 seen[n] = 1
1705 seen[n] = 1
1706
1706
1707 for p in self.manifest.parents(n):
1707 for p in self.manifest.parents(n):
1708 if p not in self.manifest.nodemap:
1708 if p not in self.manifest.nodemap:
1709 err(_("manifest %s has unknown parent %s") %
1709 err(_("manifest %s has unknown parent %s") %
1710 (short(n), short(p)))
1710 (short(n), short(p)))
1711
1711
1712 try:
1712 try:
1713 delta = mdiff.patchtext(self.manifest.delta(n))
1713 delta = mdiff.patchtext(self.manifest.delta(n))
1714 except KeyboardInterrupt:
1714 except KeyboardInterrupt:
1715 self.ui.warn(_("interrupted"))
1715 self.ui.warn(_("interrupted"))
1716 raise
1716 raise
1717 except Exception, inst:
1717 except Exception, inst:
1718 err(_("unpacking manifest %s: %s") % (short(n), inst))
1718 err(_("unpacking manifest %s: %s") % (short(n), inst))
1719
1719
1720 ff = [ l.split('\0') for l in delta.splitlines() ]
1720 ff = [ l.split('\0') for l in delta.splitlines() ]
1721 for f, fn in ff:
1721 for f, fn in ff:
1722 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1722 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1723
1723
1724 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1724 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1725
1725
1726 for m,c in neededmanifests.items():
1726 for m,c in neededmanifests.items():
1727 err(_("Changeset %s refers to unknown manifest %s") %
1727 err(_("Changeset %s refers to unknown manifest %s") %
1728 (short(m), short(c)))
1728 (short(m), short(c)))
1729 del neededmanifests
1729 del neededmanifests
1730
1730
1731 for f in filenodes:
1731 for f in filenodes:
1732 if f not in filelinkrevs:
1732 if f not in filelinkrevs:
1733 err(_("file %s in manifest but not in changesets") % f)
1733 err(_("file %s in manifest but not in changesets") % f)
1734
1734
1735 for f in filelinkrevs:
1735 for f in filelinkrevs:
1736 if f not in filenodes:
1736 if f not in filenodes:
1737 err(_("file %s in changeset but not in manifest") % f)
1737 err(_("file %s in changeset but not in manifest") % f)
1738
1738
1739 self.ui.status(_("checking files\n"))
1739 self.ui.status(_("checking files\n"))
1740 ff = filenodes.keys()
1740 ff = filenodes.keys()
1741 ff.sort()
1741 ff.sort()
1742 for f in ff:
1742 for f in ff:
1743 if f == "/dev/null": continue
1743 if f == "/dev/null": continue
1744 files += 1
1744 files += 1
1745 fl = self.file(f)
1745 fl = self.file(f)
1746 d = fl.checksize()
1746 d = fl.checksize()
1747 if d:
1747 if d:
1748 err(_("%s file data short %d bytes") % (f, d))
1748 err(_("%s file data short %d bytes") % (f, d))
1749
1749
1750 nodes = { nullid: 1 }
1750 nodes = { nullid: 1 }
1751 seen = {}
1751 seen = {}
1752 for i in range(fl.count()):
1752 for i in range(fl.count()):
1753 revisions += 1
1753 revisions += 1
1754 n = fl.node(i)
1754 n = fl.node(i)
1755
1755
1756 if n in seen:
1756 if n in seen:
1757 err(_("%s: duplicate revision %d") % (f, i))
1757 err(_("%s: duplicate revision %d") % (f, i))
1758 if n not in filenodes[f]:
1758 if n not in filenodes[f]:
1759 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1759 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1760 else:
1760 else:
1761 del filenodes[f][n]
1761 del filenodes[f][n]
1762
1762
1763 flr = fl.linkrev(n)
1763 flr = fl.linkrev(n)
1764 if flr not in filelinkrevs[f]:
1764 if flr not in filelinkrevs[f]:
1765 err(_("%s:%s points to unexpected changeset %d")
1765 err(_("%s:%s points to unexpected changeset %d")
1766 % (f, short(n), flr))
1766 % (f, short(n), flr))
1767 else:
1767 else:
1768 filelinkrevs[f].remove(flr)
1768 filelinkrevs[f].remove(flr)
1769
1769
1770 # verify contents
1770 # verify contents
1771 try:
1771 try:
1772 t = fl.read(n)
1772 t = fl.read(n)
1773 except KeyboardInterrupt:
1773 except KeyboardInterrupt:
1774 self.ui.warn(_("interrupted"))
1774 self.ui.warn(_("interrupted"))
1775 raise
1775 raise
1776 except Exception, inst:
1776 except Exception, inst:
1777 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1777 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1778
1778
1779 # verify parents
1779 # verify parents
1780 (p1, p2) = fl.parents(n)
1780 (p1, p2) = fl.parents(n)
1781 if p1 not in nodes:
1781 if p1 not in nodes:
1782 err(_("file %s:%s unknown parent 1 %s") %
1782 err(_("file %s:%s unknown parent 1 %s") %
1783 (f, short(n), short(p1)))
1783 (f, short(n), short(p1)))
1784 if p2 not in nodes:
1784 if p2 not in nodes:
1785 err(_("file %s:%s unknown parent 2 %s") %
1785 err(_("file %s:%s unknown parent 2 %s") %
1786 (f, short(n), short(p1)))
1786 (f, short(n), short(p1)))
1787 nodes[n] = 1
1787 nodes[n] = 1
1788
1788
1789 # cross-check
1789 # cross-check
1790 for node in filenodes[f]:
1790 for node in filenodes[f]:
1791 err(_("node %s in manifests not in %s") % (hex(node), f))
1791 err(_("node %s in manifests not in %s") % (hex(node), f))
1792
1792
1793 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1793 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1794 (files, changesets, revisions))
1794 (files, changesets, revisions))
1795
1795
1796 if errors[0]:
1796 if errors[0]:
1797 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1797 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1798 return 1
1798 return 1
@@ -1,12 +1,12 b''
1 abort: repository a/.hg not found!
1 abort: repository a not found!
2 255
2 255
3 requesting all changes
3 requesting all changes
4 abort: error: Connection refused
4 abort: error: Connection refused
5 255
5 255
6 abort: repository a/.hg not found!
6 abort: repository a not found!
7 255
7 255
8 abort: destination '../a' already exists
8 abort: destination '../a' already exists
9 1
9 1
10 abort: repository a/.hg not found!
10 abort: repository a not found!
11 255
11 255
12 abort: destination 'q' already exists
12 abort: destination 'q' already exists
General Comments 0
You need to be logged in to leave comments. Login now