##// END OF EJS Templates
remove appendfile for the manifest when adding a changegroup...
Benoit Boissinot -
r2395:8ed45fb1 default
parent child Browse files
Show More
@@ -1,2122 +1,2109
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "revlog")
15 demandload(globals(), "revlog")
16
16
17 class localrepository(object):
17 class localrepository(object):
18 def __del__(self):
18 def __del__(self):
19 self.transhandle = None
19 self.transhandle = None
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 if not path:
21 if not path:
22 p = os.getcwd()
22 p = os.getcwd()
23 while not os.path.isdir(os.path.join(p, ".hg")):
23 while not os.path.isdir(os.path.join(p, ".hg")):
24 oldp = p
24 oldp = p
25 p = os.path.dirname(p)
25 p = os.path.dirname(p)
26 if p == oldp:
26 if p == oldp:
27 raise repo.RepoError(_("no repo found"))
27 raise repo.RepoError(_("no repo found"))
28 path = p
28 path = p
29 self.path = os.path.join(path, ".hg")
29 self.path = os.path.join(path, ".hg")
30
30
31 if not create and not os.path.isdir(self.path):
31 if not create and not os.path.isdir(self.path):
32 raise repo.RepoError(_("repository %s not found") % path)
32 raise repo.RepoError(_("repository %s not found") % path)
33
33
34 self.root = os.path.abspath(path)
34 self.root = os.path.abspath(path)
35 self.origroot = path
35 self.origroot = path
36 self.ui = ui.ui(parentui=parentui)
36 self.ui = ui.ui(parentui=parentui)
37 self.opener = util.opener(self.path)
37 self.opener = util.opener(self.path)
38 self.wopener = util.opener(self.root)
38 self.wopener = util.opener(self.root)
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 v = self.ui.revlogopts
45 v = self.ui.revlogopts
46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 fl = v.get('flags', None)
48 fl = v.get('flags', None)
49 flags = 0
49 flags = 0
50 if fl != None:
50 if fl != None:
51 for x in fl.split():
51 for x in fl.split():
52 flags |= revlog.flagstr(x)
52 flags |= revlog.flagstr(x)
53 elif self.revlogv1:
53 elif self.revlogv1:
54 flags = revlog.REVLOG_DEFAULT_FLAGS
54 flags = revlog.REVLOG_DEFAULT_FLAGS
55
55
56 v = self.revlogversion | flags
56 v = self.revlogversion | flags
57 self.manifest = manifest.manifest(self.opener, v)
57 self.manifest = manifest.manifest(self.opener, v)
58 self.changelog = changelog.changelog(self.opener, v)
58 self.changelog = changelog.changelog(self.opener, v)
59
59
60 # the changelog might not have the inline index flag
60 # the changelog might not have the inline index flag
61 # on. If the format of the changelog is the same as found in
61 # on. If the format of the changelog is the same as found in
62 # .hgrc, apply any flags found in the .hgrc as well.
62 # .hgrc, apply any flags found in the .hgrc as well.
63 # Otherwise, just version from the changelog
63 # Otherwise, just version from the changelog
64 v = self.changelog.version
64 v = self.changelog.version
65 if v == self.revlogversion:
65 if v == self.revlogversion:
66 v |= flags
66 v |= flags
67 self.revlogversion = v
67 self.revlogversion = v
68
68
69 self.tagscache = None
69 self.tagscache = None
70 self.nodetagscache = None
70 self.nodetagscache = None
71 self.encodepats = None
71 self.encodepats = None
72 self.decodepats = None
72 self.decodepats = None
73 self.transhandle = None
73 self.transhandle = None
74
74
75 if create:
75 if create:
76 os.mkdir(self.path)
76 os.mkdir(self.path)
77 os.mkdir(self.join("data"))
77 os.mkdir(self.join("data"))
78
78
79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80
80
81 def hook(self, name, throw=False, **args):
81 def hook(self, name, throw=False, **args):
82 def callhook(hname, funcname):
82 def callhook(hname, funcname):
83 '''call python hook. hook is callable object, looked up as
83 '''call python hook. hook is callable object, looked up as
84 name in python module. if callable returns "true", hook
84 name in python module. if callable returns "true", hook
85 fails, else passes. if hook raises exception, treated as
85 fails, else passes. if hook raises exception, treated as
86 hook failure. exception propagates if throw is "true".
86 hook failure. exception propagates if throw is "true".
87
87
88 reason for "true" meaning "hook failed" is so that
88 reason for "true" meaning "hook failed" is so that
89 unmodified commands (e.g. mercurial.commands.update) can
89 unmodified commands (e.g. mercurial.commands.update) can
90 be run as hooks without wrappers to convert return values.'''
90 be run as hooks without wrappers to convert return values.'''
91
91
92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 d = funcname.rfind('.')
93 d = funcname.rfind('.')
94 if d == -1:
94 if d == -1:
95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 % (hname, funcname))
96 % (hname, funcname))
97 modname = funcname[:d]
97 modname = funcname[:d]
98 try:
98 try:
99 obj = __import__(modname)
99 obj = __import__(modname)
100 except ImportError:
100 except ImportError:
101 raise util.Abort(_('%s hook is invalid '
101 raise util.Abort(_('%s hook is invalid '
102 '(import of "%s" failed)') %
102 '(import of "%s" failed)') %
103 (hname, modname))
103 (hname, modname))
104 try:
104 try:
105 for p in funcname.split('.')[1:]:
105 for p in funcname.split('.')[1:]:
106 obj = getattr(obj, p)
106 obj = getattr(obj, p)
107 except AttributeError, err:
107 except AttributeError, err:
108 raise util.Abort(_('%s hook is invalid '
108 raise util.Abort(_('%s hook is invalid '
109 '("%s" is not defined)') %
109 '("%s" is not defined)') %
110 (hname, funcname))
110 (hname, funcname))
111 if not callable(obj):
111 if not callable(obj):
112 raise util.Abort(_('%s hook is invalid '
112 raise util.Abort(_('%s hook is invalid '
113 '("%s" is not callable)') %
113 '("%s" is not callable)') %
114 (hname, funcname))
114 (hname, funcname))
115 try:
115 try:
116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 except (KeyboardInterrupt, util.SignalInterrupt):
117 except (KeyboardInterrupt, util.SignalInterrupt):
118 raise
118 raise
119 except Exception, exc:
119 except Exception, exc:
120 if isinstance(exc, util.Abort):
120 if isinstance(exc, util.Abort):
121 self.ui.warn(_('error: %s hook failed: %s\n') %
121 self.ui.warn(_('error: %s hook failed: %s\n') %
122 (hname, exc.args[0] % exc.args[1:]))
122 (hname, exc.args[0] % exc.args[1:]))
123 else:
123 else:
124 self.ui.warn(_('error: %s hook raised an exception: '
124 self.ui.warn(_('error: %s hook raised an exception: '
125 '%s\n') % (hname, exc))
125 '%s\n') % (hname, exc))
126 if throw:
126 if throw:
127 raise
127 raise
128 self.ui.print_exc()
128 self.ui.print_exc()
129 return True
129 return True
130 if r:
130 if r:
131 if throw:
131 if throw:
132 raise util.Abort(_('%s hook failed') % hname)
132 raise util.Abort(_('%s hook failed') % hname)
133 self.ui.warn(_('warning: %s hook failed\n') % hname)
133 self.ui.warn(_('warning: %s hook failed\n') % hname)
134 return r
134 return r
135
135
136 def runhook(name, cmd):
136 def runhook(name, cmd):
137 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
137 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
138 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
138 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
139 r = util.system(cmd, environ=env, cwd=self.root)
139 r = util.system(cmd, environ=env, cwd=self.root)
140 if r:
140 if r:
141 desc, r = util.explain_exit(r)
141 desc, r = util.explain_exit(r)
142 if throw:
142 if throw:
143 raise util.Abort(_('%s hook %s') % (name, desc))
143 raise util.Abort(_('%s hook %s') % (name, desc))
144 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
144 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
145 return r
145 return r
146
146
147 r = False
147 r = False
148 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
148 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
149 if hname.split(".", 1)[0] == name and cmd]
149 if hname.split(".", 1)[0] == name and cmd]
150 hooks.sort()
150 hooks.sort()
151 for hname, cmd in hooks:
151 for hname, cmd in hooks:
152 if cmd.startswith('python:'):
152 if cmd.startswith('python:'):
153 r = callhook(hname, cmd[7:].strip()) or r
153 r = callhook(hname, cmd[7:].strip()) or r
154 else:
154 else:
155 r = runhook(hname, cmd) or r
155 r = runhook(hname, cmd) or r
156 return r
156 return r
157
157
158 def tags(self):
158 def tags(self):
159 '''return a mapping of tag to node'''
159 '''return a mapping of tag to node'''
160 if not self.tagscache:
160 if not self.tagscache:
161 self.tagscache = {}
161 self.tagscache = {}
162
162
163 def parsetag(line, context):
163 def parsetag(line, context):
164 if not line:
164 if not line:
165 return
165 return
166 s = l.split(" ", 1)
166 s = l.split(" ", 1)
167 if len(s) != 2:
167 if len(s) != 2:
168 self.ui.warn(_("%s: cannot parse entry\n") % context)
168 self.ui.warn(_("%s: cannot parse entry\n") % context)
169 return
169 return
170 node, key = s
170 node, key = s
171 key = key.strip()
171 key = key.strip()
172 try:
172 try:
173 bin_n = bin(node)
173 bin_n = bin(node)
174 except TypeError:
174 except TypeError:
175 self.ui.warn(_("%s: node '%s' is not well formed\n") %
175 self.ui.warn(_("%s: node '%s' is not well formed\n") %
176 (context, node))
176 (context, node))
177 return
177 return
178 if bin_n not in self.changelog.nodemap:
178 if bin_n not in self.changelog.nodemap:
179 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
179 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
180 (context, key))
180 (context, key))
181 return
181 return
182 self.tagscache[key] = bin_n
182 self.tagscache[key] = bin_n
183
183
184 # read the tags file from each head, ending with the tip,
184 # read the tags file from each head, ending with the tip,
185 # and add each tag found to the map, with "newer" ones
185 # and add each tag found to the map, with "newer" ones
186 # taking precedence
186 # taking precedence
187 heads = self.heads()
187 heads = self.heads()
188 heads.reverse()
188 heads.reverse()
189 fl = self.file(".hgtags")
189 fl = self.file(".hgtags")
190 for node in heads:
190 for node in heads:
191 change = self.changelog.read(node)
191 change = self.changelog.read(node)
192 rev = self.changelog.rev(node)
192 rev = self.changelog.rev(node)
193 fn, ff = self.manifest.find(change[0], '.hgtags')
193 fn, ff = self.manifest.find(change[0], '.hgtags')
194 if fn is None: continue
194 if fn is None: continue
195 count = 0
195 count = 0
196 for l in fl.read(fn).splitlines():
196 for l in fl.read(fn).splitlines():
197 count += 1
197 count += 1
198 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
198 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
199 (rev, short(node), count))
199 (rev, short(node), count))
200 try:
200 try:
201 f = self.opener("localtags")
201 f = self.opener("localtags")
202 count = 0
202 count = 0
203 for l in f:
203 for l in f:
204 count += 1
204 count += 1
205 parsetag(l, _("localtags, line %d") % count)
205 parsetag(l, _("localtags, line %d") % count)
206 except IOError:
206 except IOError:
207 pass
207 pass
208
208
209 self.tagscache['tip'] = self.changelog.tip()
209 self.tagscache['tip'] = self.changelog.tip()
210
210
211 return self.tagscache
211 return self.tagscache
212
212
213 def tagslist(self):
213 def tagslist(self):
214 '''return a list of tags ordered by revision'''
214 '''return a list of tags ordered by revision'''
215 l = []
215 l = []
216 for t, n in self.tags().items():
216 for t, n in self.tags().items():
217 try:
217 try:
218 r = self.changelog.rev(n)
218 r = self.changelog.rev(n)
219 except:
219 except:
220 r = -2 # sort to the beginning of the list if unknown
220 r = -2 # sort to the beginning of the list if unknown
221 l.append((r, t, n))
221 l.append((r, t, n))
222 l.sort()
222 l.sort()
223 return [(t, n) for r, t, n in l]
223 return [(t, n) for r, t, n in l]
224
224
225 def nodetags(self, node):
225 def nodetags(self, node):
226 '''return the tags associated with a node'''
226 '''return the tags associated with a node'''
227 if not self.nodetagscache:
227 if not self.nodetagscache:
228 self.nodetagscache = {}
228 self.nodetagscache = {}
229 for t, n in self.tags().items():
229 for t, n in self.tags().items():
230 self.nodetagscache.setdefault(n, []).append(t)
230 self.nodetagscache.setdefault(n, []).append(t)
231 return self.nodetagscache.get(node, [])
231 return self.nodetagscache.get(node, [])
232
232
233 def lookup(self, key):
233 def lookup(self, key):
234 try:
234 try:
235 return self.tags()[key]
235 return self.tags()[key]
236 except KeyError:
236 except KeyError:
237 try:
237 try:
238 return self.changelog.lookup(key)
238 return self.changelog.lookup(key)
239 except:
239 except:
240 raise repo.RepoError(_("unknown revision '%s'") % key)
240 raise repo.RepoError(_("unknown revision '%s'") % key)
241
241
242 def dev(self):
242 def dev(self):
243 return os.stat(self.path).st_dev
243 return os.stat(self.path).st_dev
244
244
245 def local(self):
245 def local(self):
246 return True
246 return True
247
247
248 def join(self, f):
248 def join(self, f):
249 return os.path.join(self.path, f)
249 return os.path.join(self.path, f)
250
250
251 def wjoin(self, f):
251 def wjoin(self, f):
252 return os.path.join(self.root, f)
252 return os.path.join(self.root, f)
253
253
254 def file(self, f):
254 def file(self, f):
255 if f[0] == '/':
255 if f[0] == '/':
256 f = f[1:]
256 f = f[1:]
257 return filelog.filelog(self.opener, f, self.revlogversion)
257 return filelog.filelog(self.opener, f, self.revlogversion)
258
258
259 def getcwd(self):
259 def getcwd(self):
260 return self.dirstate.getcwd()
260 return self.dirstate.getcwd()
261
261
262 def wfile(self, f, mode='r'):
262 def wfile(self, f, mode='r'):
263 return self.wopener(f, mode)
263 return self.wopener(f, mode)
264
264
265 def wread(self, filename):
265 def wread(self, filename):
266 if self.encodepats == None:
266 if self.encodepats == None:
267 l = []
267 l = []
268 for pat, cmd in self.ui.configitems("encode"):
268 for pat, cmd in self.ui.configitems("encode"):
269 mf = util.matcher(self.root, "", [pat], [], [])[1]
269 mf = util.matcher(self.root, "", [pat], [], [])[1]
270 l.append((mf, cmd))
270 l.append((mf, cmd))
271 self.encodepats = l
271 self.encodepats = l
272
272
273 data = self.wopener(filename, 'r').read()
273 data = self.wopener(filename, 'r').read()
274
274
275 for mf, cmd in self.encodepats:
275 for mf, cmd in self.encodepats:
276 if mf(filename):
276 if mf(filename):
277 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
277 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
278 data = util.filter(data, cmd)
278 data = util.filter(data, cmd)
279 break
279 break
280
280
281 return data
281 return data
282
282
283 def wwrite(self, filename, data, fd=None):
283 def wwrite(self, filename, data, fd=None):
284 if self.decodepats == None:
284 if self.decodepats == None:
285 l = []
285 l = []
286 for pat, cmd in self.ui.configitems("decode"):
286 for pat, cmd in self.ui.configitems("decode"):
287 mf = util.matcher(self.root, "", [pat], [], [])[1]
287 mf = util.matcher(self.root, "", [pat], [], [])[1]
288 l.append((mf, cmd))
288 l.append((mf, cmd))
289 self.decodepats = l
289 self.decodepats = l
290
290
291 for mf, cmd in self.decodepats:
291 for mf, cmd in self.decodepats:
292 if mf(filename):
292 if mf(filename):
293 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
293 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
294 data = util.filter(data, cmd)
294 data = util.filter(data, cmd)
295 break
295 break
296
296
297 if fd:
297 if fd:
298 return fd.write(data)
298 return fd.write(data)
299 return self.wopener(filename, 'w').write(data)
299 return self.wopener(filename, 'w').write(data)
300
300
301 def transaction(self):
301 def transaction(self):
302 tr = self.transhandle
302 tr = self.transhandle
303 if tr != None and tr.running():
303 if tr != None and tr.running():
304 return tr.nest()
304 return tr.nest()
305
305
306 # save dirstate for rollback
306 # save dirstate for rollback
307 try:
307 try:
308 ds = self.opener("dirstate").read()
308 ds = self.opener("dirstate").read()
309 except IOError:
309 except IOError:
310 ds = ""
310 ds = ""
311 self.opener("journal.dirstate", "w").write(ds)
311 self.opener("journal.dirstate", "w").write(ds)
312
312
313 tr = transaction.transaction(self.ui.warn, self.opener,
313 tr = transaction.transaction(self.ui.warn, self.opener,
314 self.join("journal"),
314 self.join("journal"),
315 aftertrans(self.path))
315 aftertrans(self.path))
316 self.transhandle = tr
316 self.transhandle = tr
317 return tr
317 return tr
318
318
319 def recover(self):
319 def recover(self):
320 l = self.lock()
320 l = self.lock()
321 if os.path.exists(self.join("journal")):
321 if os.path.exists(self.join("journal")):
322 self.ui.status(_("rolling back interrupted transaction\n"))
322 self.ui.status(_("rolling back interrupted transaction\n"))
323 transaction.rollback(self.opener, self.join("journal"))
323 transaction.rollback(self.opener, self.join("journal"))
324 self.reload()
324 self.reload()
325 return True
325 return True
326 else:
326 else:
327 self.ui.warn(_("no interrupted transaction available\n"))
327 self.ui.warn(_("no interrupted transaction available\n"))
328 return False
328 return False
329
329
330 def rollback(self, wlock=None):
330 def rollback(self, wlock=None):
331 if not wlock:
331 if not wlock:
332 wlock = self.wlock()
332 wlock = self.wlock()
333 l = self.lock()
333 l = self.lock()
334 if os.path.exists(self.join("undo")):
334 if os.path.exists(self.join("undo")):
335 self.ui.status(_("rolling back last transaction\n"))
335 self.ui.status(_("rolling back last transaction\n"))
336 transaction.rollback(self.opener, self.join("undo"))
336 transaction.rollback(self.opener, self.join("undo"))
337 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
337 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
338 self.reload()
338 self.reload()
339 self.wreload()
339 self.wreload()
340 else:
340 else:
341 self.ui.warn(_("no rollback information available\n"))
341 self.ui.warn(_("no rollback information available\n"))
342
342
343 def wreload(self):
343 def wreload(self):
344 self.dirstate.read()
344 self.dirstate.read()
345
345
346 def reload(self):
346 def reload(self):
347 self.changelog.load()
347 self.changelog.load()
348 self.manifest.load()
348 self.manifest.load()
349 self.tagscache = None
349 self.tagscache = None
350 self.nodetagscache = None
350 self.nodetagscache = None
351
351
352 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
352 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
353 desc=None):
353 desc=None):
354 try:
354 try:
355 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
355 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
356 except lock.LockHeld, inst:
356 except lock.LockHeld, inst:
357 if not wait:
357 if not wait:
358 raise
358 raise
359 self.ui.warn(_("waiting for lock on %s held by %s\n") %
359 self.ui.warn(_("waiting for lock on %s held by %s\n") %
360 (desc, inst.args[0]))
360 (desc, inst.args[0]))
361 # default to 600 seconds timeout
361 # default to 600 seconds timeout
362 l = lock.lock(self.join(lockname),
362 l = lock.lock(self.join(lockname),
363 int(self.ui.config("ui", "timeout") or 600),
363 int(self.ui.config("ui", "timeout") or 600),
364 releasefn, desc=desc)
364 releasefn, desc=desc)
365 if acquirefn:
365 if acquirefn:
366 acquirefn()
366 acquirefn()
367 return l
367 return l
368
368
369 def lock(self, wait=1):
369 def lock(self, wait=1):
370 return self.do_lock("lock", wait, acquirefn=self.reload,
370 return self.do_lock("lock", wait, acquirefn=self.reload,
371 desc=_('repository %s') % self.origroot)
371 desc=_('repository %s') % self.origroot)
372
372
373 def wlock(self, wait=1):
373 def wlock(self, wait=1):
374 return self.do_lock("wlock", wait, self.dirstate.write,
374 return self.do_lock("wlock", wait, self.dirstate.write,
375 self.wreload,
375 self.wreload,
376 desc=_('working directory of %s') % self.origroot)
376 desc=_('working directory of %s') % self.origroot)
377
377
378 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
378 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
379 "determine whether a new filenode is needed"
379 "determine whether a new filenode is needed"
380 fp1 = manifest1.get(filename, nullid)
380 fp1 = manifest1.get(filename, nullid)
381 fp2 = manifest2.get(filename, nullid)
381 fp2 = manifest2.get(filename, nullid)
382
382
383 if fp2 != nullid:
383 if fp2 != nullid:
384 # is one parent an ancestor of the other?
384 # is one parent an ancestor of the other?
385 fpa = filelog.ancestor(fp1, fp2)
385 fpa = filelog.ancestor(fp1, fp2)
386 if fpa == fp1:
386 if fpa == fp1:
387 fp1, fp2 = fp2, nullid
387 fp1, fp2 = fp2, nullid
388 elif fpa == fp2:
388 elif fpa == fp2:
389 fp2 = nullid
389 fp2 = nullid
390
390
391 # is the file unmodified from the parent? report existing entry
391 # is the file unmodified from the parent? report existing entry
392 if fp2 == nullid and text == filelog.read(fp1):
392 if fp2 == nullid and text == filelog.read(fp1):
393 return (fp1, None, None)
393 return (fp1, None, None)
394
394
395 return (None, fp1, fp2)
395 return (None, fp1, fp2)
396
396
397 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
397 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
398 orig_parent = self.dirstate.parents()[0] or nullid
398 orig_parent = self.dirstate.parents()[0] or nullid
399 p1 = p1 or self.dirstate.parents()[0] or nullid
399 p1 = p1 or self.dirstate.parents()[0] or nullid
400 p2 = p2 or self.dirstate.parents()[1] or nullid
400 p2 = p2 or self.dirstate.parents()[1] or nullid
401 c1 = self.changelog.read(p1)
401 c1 = self.changelog.read(p1)
402 c2 = self.changelog.read(p2)
402 c2 = self.changelog.read(p2)
403 m1 = self.manifest.read(c1[0])
403 m1 = self.manifest.read(c1[0])
404 mf1 = self.manifest.readflags(c1[0])
404 mf1 = self.manifest.readflags(c1[0])
405 m2 = self.manifest.read(c2[0])
405 m2 = self.manifest.read(c2[0])
406 changed = []
406 changed = []
407
407
408 if orig_parent == p1:
408 if orig_parent == p1:
409 update_dirstate = 1
409 update_dirstate = 1
410 else:
410 else:
411 update_dirstate = 0
411 update_dirstate = 0
412
412
413 if not wlock:
413 if not wlock:
414 wlock = self.wlock()
414 wlock = self.wlock()
415 l = self.lock()
415 l = self.lock()
416 tr = self.transaction()
416 tr = self.transaction()
417 mm = m1.copy()
417 mm = m1.copy()
418 mfm = mf1.copy()
418 mfm = mf1.copy()
419 linkrev = self.changelog.count()
419 linkrev = self.changelog.count()
420 for f in files:
420 for f in files:
421 try:
421 try:
422 t = self.wread(f)
422 t = self.wread(f)
423 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
423 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
424 r = self.file(f)
424 r = self.file(f)
425 mfm[f] = tm
425 mfm[f] = tm
426
426
427 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
427 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
428 if entry:
428 if entry:
429 mm[f] = entry
429 mm[f] = entry
430 continue
430 continue
431
431
432 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
432 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
433 changed.append(f)
433 changed.append(f)
434 if update_dirstate:
434 if update_dirstate:
435 self.dirstate.update([f], "n")
435 self.dirstate.update([f], "n")
436 except IOError:
436 except IOError:
437 try:
437 try:
438 del mm[f]
438 del mm[f]
439 del mfm[f]
439 del mfm[f]
440 if update_dirstate:
440 if update_dirstate:
441 self.dirstate.forget([f])
441 self.dirstate.forget([f])
442 except:
442 except:
443 # deleted from p2?
443 # deleted from p2?
444 pass
444 pass
445
445
446 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
446 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
447 user = user or self.ui.username()
447 user = user or self.ui.username()
448 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
448 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
449 tr.close()
449 tr.close()
450 if update_dirstate:
450 if update_dirstate:
451 self.dirstate.setparents(n, nullid)
451 self.dirstate.setparents(n, nullid)
452
452
453 def commit(self, files=None, text="", user=None, date=None,
453 def commit(self, files=None, text="", user=None, date=None,
454 match=util.always, force=False, lock=None, wlock=None,
454 match=util.always, force=False, lock=None, wlock=None,
455 force_editor=False):
455 force_editor=False):
456 commit = []
456 commit = []
457 remove = []
457 remove = []
458 changed = []
458 changed = []
459
459
460 if files:
460 if files:
461 for f in files:
461 for f in files:
462 s = self.dirstate.state(f)
462 s = self.dirstate.state(f)
463 if s in 'nmai':
463 if s in 'nmai':
464 commit.append(f)
464 commit.append(f)
465 elif s == 'r':
465 elif s == 'r':
466 remove.append(f)
466 remove.append(f)
467 else:
467 else:
468 self.ui.warn(_("%s not tracked!\n") % f)
468 self.ui.warn(_("%s not tracked!\n") % f)
469 else:
469 else:
470 modified, added, removed, deleted, unknown = self.changes(match=match)
470 modified, added, removed, deleted, unknown = self.changes(match=match)
471 commit = modified + added
471 commit = modified + added
472 remove = removed
472 remove = removed
473
473
474 p1, p2 = self.dirstate.parents()
474 p1, p2 = self.dirstate.parents()
475 c1 = self.changelog.read(p1)
475 c1 = self.changelog.read(p1)
476 c2 = self.changelog.read(p2)
476 c2 = self.changelog.read(p2)
477 m1 = self.manifest.read(c1[0])
477 m1 = self.manifest.read(c1[0])
478 mf1 = self.manifest.readflags(c1[0])
478 mf1 = self.manifest.readflags(c1[0])
479 m2 = self.manifest.read(c2[0])
479 m2 = self.manifest.read(c2[0])
480
480
481 if not commit and not remove and not force and p2 == nullid:
481 if not commit and not remove and not force and p2 == nullid:
482 self.ui.status(_("nothing changed\n"))
482 self.ui.status(_("nothing changed\n"))
483 return None
483 return None
484
484
485 xp1 = hex(p1)
485 xp1 = hex(p1)
486 if p2 == nullid: xp2 = ''
486 if p2 == nullid: xp2 = ''
487 else: xp2 = hex(p2)
487 else: xp2 = hex(p2)
488
488
489 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
489 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
490
490
491 if not wlock:
491 if not wlock:
492 wlock = self.wlock()
492 wlock = self.wlock()
493 if not lock:
493 if not lock:
494 lock = self.lock()
494 lock = self.lock()
495 tr = self.transaction()
495 tr = self.transaction()
496
496
497 # check in files
497 # check in files
498 new = {}
498 new = {}
499 linkrev = self.changelog.count()
499 linkrev = self.changelog.count()
500 commit.sort()
500 commit.sort()
501 for f in commit:
501 for f in commit:
502 self.ui.note(f + "\n")
502 self.ui.note(f + "\n")
503 try:
503 try:
504 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
504 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
505 t = self.wread(f)
505 t = self.wread(f)
506 except IOError:
506 except IOError:
507 self.ui.warn(_("trouble committing %s!\n") % f)
507 self.ui.warn(_("trouble committing %s!\n") % f)
508 raise
508 raise
509
509
510 r = self.file(f)
510 r = self.file(f)
511
511
512 meta = {}
512 meta = {}
513 cp = self.dirstate.copied(f)
513 cp = self.dirstate.copied(f)
514 if cp:
514 if cp:
515 meta["copy"] = cp
515 meta["copy"] = cp
516 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
516 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
517 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
517 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
518 fp1, fp2 = nullid, nullid
518 fp1, fp2 = nullid, nullid
519 else:
519 else:
520 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
520 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
521 if entry:
521 if entry:
522 new[f] = entry
522 new[f] = entry
523 continue
523 continue
524
524
525 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
525 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
526 # remember what we've added so that we can later calculate
526 # remember what we've added so that we can later calculate
527 # the files to pull from a set of changesets
527 # the files to pull from a set of changesets
528 changed.append(f)
528 changed.append(f)
529
529
530 # update manifest
530 # update manifest
531 m1 = m1.copy()
531 m1 = m1.copy()
532 m1.update(new)
532 m1.update(new)
533 for f in remove:
533 for f in remove:
534 if f in m1:
534 if f in m1:
535 del m1[f]
535 del m1[f]
536 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
536 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
537 (new, remove))
537 (new, remove))
538
538
539 # add changeset
539 # add changeset
540 new = new.keys()
540 new = new.keys()
541 new.sort()
541 new.sort()
542
542
543 user = user or self.ui.username()
543 user = user or self.ui.username()
544 if not text or force_editor:
544 if not text or force_editor:
545 edittext = []
545 edittext = []
546 if text:
546 if text:
547 edittext.append(text)
547 edittext.append(text)
548 edittext.append("")
548 edittext.append("")
549 if p2 != nullid:
549 if p2 != nullid:
550 edittext.append("HG: branch merge")
550 edittext.append("HG: branch merge")
551 edittext.extend(["HG: changed %s" % f for f in changed])
551 edittext.extend(["HG: changed %s" % f for f in changed])
552 edittext.extend(["HG: removed %s" % f for f in remove])
552 edittext.extend(["HG: removed %s" % f for f in remove])
553 if not changed and not remove:
553 if not changed and not remove:
554 edittext.append("HG: no files changed")
554 edittext.append("HG: no files changed")
555 edittext.append("")
555 edittext.append("")
556 # run editor in the repository root
556 # run editor in the repository root
557 olddir = os.getcwd()
557 olddir = os.getcwd()
558 os.chdir(self.root)
558 os.chdir(self.root)
559 text = self.ui.edit("\n".join(edittext), user)
559 text = self.ui.edit("\n".join(edittext), user)
560 os.chdir(olddir)
560 os.chdir(olddir)
561
561
562 lines = [line.rstrip() for line in text.rstrip().splitlines()]
562 lines = [line.rstrip() for line in text.rstrip().splitlines()]
563 while lines and not lines[0]:
563 while lines and not lines[0]:
564 del lines[0]
564 del lines[0]
565 if not lines:
565 if not lines:
566 return None
566 return None
567 text = '\n'.join(lines)
567 text = '\n'.join(lines)
568 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
568 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
569 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
569 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
570 parent2=xp2)
570 parent2=xp2)
571 tr.close()
571 tr.close()
572
572
573 self.dirstate.setparents(n)
573 self.dirstate.setparents(n)
574 self.dirstate.update(new, "n")
574 self.dirstate.update(new, "n")
575 self.dirstate.forget(remove)
575 self.dirstate.forget(remove)
576
576
577 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
577 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
578 return n
578 return n
579
579
580 def walk(self, node=None, files=[], match=util.always, badmatch=None):
580 def walk(self, node=None, files=[], match=util.always, badmatch=None):
581 if node:
581 if node:
582 fdict = dict.fromkeys(files)
582 fdict = dict.fromkeys(files)
583 for fn in self.manifest.read(self.changelog.read(node)[0]):
583 for fn in self.manifest.read(self.changelog.read(node)[0]):
584 fdict.pop(fn, None)
584 fdict.pop(fn, None)
585 if match(fn):
585 if match(fn):
586 yield 'm', fn
586 yield 'm', fn
587 for fn in fdict:
587 for fn in fdict:
588 if badmatch and badmatch(fn):
588 if badmatch and badmatch(fn):
589 if match(fn):
589 if match(fn):
590 yield 'b', fn
590 yield 'b', fn
591 else:
591 else:
592 self.ui.warn(_('%s: No such file in rev %s\n') % (
592 self.ui.warn(_('%s: No such file in rev %s\n') % (
593 util.pathto(self.getcwd(), fn), short(node)))
593 util.pathto(self.getcwd(), fn), short(node)))
594 else:
594 else:
595 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
595 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
596 yield src, fn
596 yield src, fn
597
597
598 def changes(self, node1=None, node2=None, files=[], match=util.always,
598 def changes(self, node1=None, node2=None, files=[], match=util.always,
599 wlock=None, show_ignored=None):
599 wlock=None, show_ignored=None):
600 """return changes between two nodes or node and working directory
600 """return changes between two nodes or node and working directory
601
601
602 If node1 is None, use the first dirstate parent instead.
602 If node1 is None, use the first dirstate parent instead.
603 If node2 is None, compare node1 with working directory.
603 If node2 is None, compare node1 with working directory.
604 """
604 """
605
605
606 def fcmp(fn, mf):
606 def fcmp(fn, mf):
607 t1 = self.wread(fn)
607 t1 = self.wread(fn)
608 t2 = self.file(fn).read(mf.get(fn, nullid))
608 t2 = self.file(fn).read(mf.get(fn, nullid))
609 return cmp(t1, t2)
609 return cmp(t1, t2)
610
610
611 def mfmatches(node):
611 def mfmatches(node):
612 change = self.changelog.read(node)
612 change = self.changelog.read(node)
613 mf = dict(self.manifest.read(change[0]))
613 mf = dict(self.manifest.read(change[0]))
614 for fn in mf.keys():
614 for fn in mf.keys():
615 if not match(fn):
615 if not match(fn):
616 del mf[fn]
616 del mf[fn]
617 return mf
617 return mf
618
618
619 if node1:
619 if node1:
620 # read the manifest from node1 before the manifest from node2,
620 # read the manifest from node1 before the manifest from node2,
621 # so that we'll hit the manifest cache if we're going through
621 # so that we'll hit the manifest cache if we're going through
622 # all the revisions in parent->child order.
622 # all the revisions in parent->child order.
623 mf1 = mfmatches(node1)
623 mf1 = mfmatches(node1)
624
624
625 # are we comparing the working directory?
625 # are we comparing the working directory?
626 if not node2:
626 if not node2:
627 if not wlock:
627 if not wlock:
628 try:
628 try:
629 wlock = self.wlock(wait=0)
629 wlock = self.wlock(wait=0)
630 except lock.LockException:
630 except lock.LockException:
631 wlock = None
631 wlock = None
632 lookup, modified, added, removed, deleted, unknown, ignored = (
632 lookup, modified, added, removed, deleted, unknown, ignored = (
633 self.dirstate.changes(files, match, show_ignored))
633 self.dirstate.changes(files, match, show_ignored))
634
634
635 # are we comparing working dir against its parent?
635 # are we comparing working dir against its parent?
636 if not node1:
636 if not node1:
637 if lookup:
637 if lookup:
638 # do a full compare of any files that might have changed
638 # do a full compare of any files that might have changed
639 mf2 = mfmatches(self.dirstate.parents()[0])
639 mf2 = mfmatches(self.dirstate.parents()[0])
640 for f in lookup:
640 for f in lookup:
641 if fcmp(f, mf2):
641 if fcmp(f, mf2):
642 modified.append(f)
642 modified.append(f)
643 elif wlock is not None:
643 elif wlock is not None:
644 self.dirstate.update([f], "n")
644 self.dirstate.update([f], "n")
645 else:
645 else:
646 # we are comparing working dir against non-parent
646 # we are comparing working dir against non-parent
647 # generate a pseudo-manifest for the working dir
647 # generate a pseudo-manifest for the working dir
648 mf2 = mfmatches(self.dirstate.parents()[0])
648 mf2 = mfmatches(self.dirstate.parents()[0])
649 for f in lookup + modified + added:
649 for f in lookup + modified + added:
650 mf2[f] = ""
650 mf2[f] = ""
651 for f in removed:
651 for f in removed:
652 if f in mf2:
652 if f in mf2:
653 del mf2[f]
653 del mf2[f]
654 else:
654 else:
655 # we are comparing two revisions
655 # we are comparing two revisions
656 deleted, unknown, ignored = [], [], []
656 deleted, unknown, ignored = [], [], []
657 mf2 = mfmatches(node2)
657 mf2 = mfmatches(node2)
658
658
659 if node1:
659 if node1:
660 # flush lists from dirstate before comparing manifests
660 # flush lists from dirstate before comparing manifests
661 modified, added = [], []
661 modified, added = [], []
662
662
663 for fn in mf2:
663 for fn in mf2:
664 if mf1.has_key(fn):
664 if mf1.has_key(fn):
665 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
665 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
666 modified.append(fn)
666 modified.append(fn)
667 del mf1[fn]
667 del mf1[fn]
668 else:
668 else:
669 added.append(fn)
669 added.append(fn)
670
670
671 removed = mf1.keys()
671 removed = mf1.keys()
672
672
673 # sort and return results:
673 # sort and return results:
674 for l in modified, added, removed, deleted, unknown, ignored:
674 for l in modified, added, removed, deleted, unknown, ignored:
675 l.sort()
675 l.sort()
676 if show_ignored is None:
676 if show_ignored is None:
677 return (modified, added, removed, deleted, unknown)
677 return (modified, added, removed, deleted, unknown)
678 else:
678 else:
679 return (modified, added, removed, deleted, unknown, ignored)
679 return (modified, added, removed, deleted, unknown, ignored)
680
680
681 def add(self, list, wlock=None):
681 def add(self, list, wlock=None):
682 if not wlock:
682 if not wlock:
683 wlock = self.wlock()
683 wlock = self.wlock()
684 for f in list:
684 for f in list:
685 p = self.wjoin(f)
685 p = self.wjoin(f)
686 if not os.path.exists(p):
686 if not os.path.exists(p):
687 self.ui.warn(_("%s does not exist!\n") % f)
687 self.ui.warn(_("%s does not exist!\n") % f)
688 elif not os.path.isfile(p):
688 elif not os.path.isfile(p):
689 self.ui.warn(_("%s not added: only files supported currently\n")
689 self.ui.warn(_("%s not added: only files supported currently\n")
690 % f)
690 % f)
691 elif self.dirstate.state(f) in 'an':
691 elif self.dirstate.state(f) in 'an':
692 self.ui.warn(_("%s already tracked!\n") % f)
692 self.ui.warn(_("%s already tracked!\n") % f)
693 else:
693 else:
694 self.dirstate.update([f], "a")
694 self.dirstate.update([f], "a")
695
695
696 def forget(self, list, wlock=None):
696 def forget(self, list, wlock=None):
697 if not wlock:
697 if not wlock:
698 wlock = self.wlock()
698 wlock = self.wlock()
699 for f in list:
699 for f in list:
700 if self.dirstate.state(f) not in 'ai':
700 if self.dirstate.state(f) not in 'ai':
701 self.ui.warn(_("%s not added!\n") % f)
701 self.ui.warn(_("%s not added!\n") % f)
702 else:
702 else:
703 self.dirstate.forget([f])
703 self.dirstate.forget([f])
704
704
705 def remove(self, list, unlink=False, wlock=None):
705 def remove(self, list, unlink=False, wlock=None):
706 if unlink:
706 if unlink:
707 for f in list:
707 for f in list:
708 try:
708 try:
709 util.unlink(self.wjoin(f))
709 util.unlink(self.wjoin(f))
710 except OSError, inst:
710 except OSError, inst:
711 if inst.errno != errno.ENOENT:
711 if inst.errno != errno.ENOENT:
712 raise
712 raise
713 if not wlock:
713 if not wlock:
714 wlock = self.wlock()
714 wlock = self.wlock()
715 for f in list:
715 for f in list:
716 p = self.wjoin(f)
716 p = self.wjoin(f)
717 if os.path.exists(p):
717 if os.path.exists(p):
718 self.ui.warn(_("%s still exists!\n") % f)
718 self.ui.warn(_("%s still exists!\n") % f)
719 elif self.dirstate.state(f) == 'a':
719 elif self.dirstate.state(f) == 'a':
720 self.dirstate.forget([f])
720 self.dirstate.forget([f])
721 elif f not in self.dirstate:
721 elif f not in self.dirstate:
722 self.ui.warn(_("%s not tracked!\n") % f)
722 self.ui.warn(_("%s not tracked!\n") % f)
723 else:
723 else:
724 self.dirstate.update([f], "r")
724 self.dirstate.update([f], "r")
725
725
726 def undelete(self, list, wlock=None):
726 def undelete(self, list, wlock=None):
727 p = self.dirstate.parents()[0]
727 p = self.dirstate.parents()[0]
728 mn = self.changelog.read(p)[0]
728 mn = self.changelog.read(p)[0]
729 mf = self.manifest.readflags(mn)
729 mf = self.manifest.readflags(mn)
730 m = self.manifest.read(mn)
730 m = self.manifest.read(mn)
731 if not wlock:
731 if not wlock:
732 wlock = self.wlock()
732 wlock = self.wlock()
733 for f in list:
733 for f in list:
734 if self.dirstate.state(f) not in "r":
734 if self.dirstate.state(f) not in "r":
735 self.ui.warn("%s not removed!\n" % f)
735 self.ui.warn("%s not removed!\n" % f)
736 else:
736 else:
737 t = self.file(f).read(m[f])
737 t = self.file(f).read(m[f])
738 self.wwrite(f, t)
738 self.wwrite(f, t)
739 util.set_exec(self.wjoin(f), mf[f])
739 util.set_exec(self.wjoin(f), mf[f])
740 self.dirstate.update([f], "n")
740 self.dirstate.update([f], "n")
741
741
742 def copy(self, source, dest, wlock=None):
742 def copy(self, source, dest, wlock=None):
743 p = self.wjoin(dest)
743 p = self.wjoin(dest)
744 if not os.path.exists(p):
744 if not os.path.exists(p):
745 self.ui.warn(_("%s does not exist!\n") % dest)
745 self.ui.warn(_("%s does not exist!\n") % dest)
746 elif not os.path.isfile(p):
746 elif not os.path.isfile(p):
747 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
747 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
748 else:
748 else:
749 if not wlock:
749 if not wlock:
750 wlock = self.wlock()
750 wlock = self.wlock()
751 if self.dirstate.state(dest) == '?':
751 if self.dirstate.state(dest) == '?':
752 self.dirstate.update([dest], "a")
752 self.dirstate.update([dest], "a")
753 self.dirstate.copy(source, dest)
753 self.dirstate.copy(source, dest)
754
754
755 def heads(self, start=None):
755 def heads(self, start=None):
756 heads = self.changelog.heads(start)
756 heads = self.changelog.heads(start)
757 # sort the output in rev descending order
757 # sort the output in rev descending order
758 heads = [(-self.changelog.rev(h), h) for h in heads]
758 heads = [(-self.changelog.rev(h), h) for h in heads]
759 heads.sort()
759 heads.sort()
760 return [n for (r, n) in heads]
760 return [n for (r, n) in heads]
761
761
762 # branchlookup returns a dict giving a list of branches for
762 # branchlookup returns a dict giving a list of branches for
763 # each head. A branch is defined as the tag of a node or
763 # each head. A branch is defined as the tag of a node or
764 # the branch of the node's parents. If a node has multiple
764 # the branch of the node's parents. If a node has multiple
765 # branch tags, tags are eliminated if they are visible from other
765 # branch tags, tags are eliminated if they are visible from other
766 # branch tags.
766 # branch tags.
767 #
767 #
768 # So, for this graph: a->b->c->d->e
768 # So, for this graph: a->b->c->d->e
769 # \ /
769 # \ /
770 # aa -----/
770 # aa -----/
771 # a has tag 2.6.12
771 # a has tag 2.6.12
772 # d has tag 2.6.13
772 # d has tag 2.6.13
773 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
773 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
774 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
774 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
775 # from the list.
775 # from the list.
776 #
776 #
777 # It is possible that more than one head will have the same branch tag.
777 # It is possible that more than one head will have the same branch tag.
778 # callers need to check the result for multiple heads under the same
778 # callers need to check the result for multiple heads under the same
779 # branch tag if that is a problem for them (ie checkout of a specific
779 # branch tag if that is a problem for them (ie checkout of a specific
780 # branch).
780 # branch).
781 #
781 #
782 # passing in a specific branch will limit the depth of the search
782 # passing in a specific branch will limit the depth of the search
783 # through the parents. It won't limit the branches returned in the
783 # through the parents. It won't limit the branches returned in the
784 # result though.
784 # result though.
785 def branchlookup(self, heads=None, branch=None):
785 def branchlookup(self, heads=None, branch=None):
786 if not heads:
786 if not heads:
787 heads = self.heads()
787 heads = self.heads()
788 headt = [ h for h in heads ]
788 headt = [ h for h in heads ]
789 chlog = self.changelog
789 chlog = self.changelog
790 branches = {}
790 branches = {}
791 merges = []
791 merges = []
792 seenmerge = {}
792 seenmerge = {}
793
793
794 # traverse the tree once for each head, recording in the branches
794 # traverse the tree once for each head, recording in the branches
795 # dict which tags are visible from this head. The branches
795 # dict which tags are visible from this head. The branches
796 # dict also records which tags are visible from each tag
796 # dict also records which tags are visible from each tag
797 # while we traverse.
797 # while we traverse.
798 while headt or merges:
798 while headt or merges:
799 if merges:
799 if merges:
800 n, found = merges.pop()
800 n, found = merges.pop()
801 visit = [n]
801 visit = [n]
802 else:
802 else:
803 h = headt.pop()
803 h = headt.pop()
804 visit = [h]
804 visit = [h]
805 found = [h]
805 found = [h]
806 seen = {}
806 seen = {}
807 while visit:
807 while visit:
808 n = visit.pop()
808 n = visit.pop()
809 if n in seen:
809 if n in seen:
810 continue
810 continue
811 pp = chlog.parents(n)
811 pp = chlog.parents(n)
812 tags = self.nodetags(n)
812 tags = self.nodetags(n)
813 if tags:
813 if tags:
814 for x in tags:
814 for x in tags:
815 if x == 'tip':
815 if x == 'tip':
816 continue
816 continue
817 for f in found:
817 for f in found:
818 branches.setdefault(f, {})[n] = 1
818 branches.setdefault(f, {})[n] = 1
819 branches.setdefault(n, {})[n] = 1
819 branches.setdefault(n, {})[n] = 1
820 break
820 break
821 if n not in found:
821 if n not in found:
822 found.append(n)
822 found.append(n)
823 if branch in tags:
823 if branch in tags:
824 continue
824 continue
825 seen[n] = 1
825 seen[n] = 1
826 if pp[1] != nullid and n not in seenmerge:
826 if pp[1] != nullid and n not in seenmerge:
827 merges.append((pp[1], [x for x in found]))
827 merges.append((pp[1], [x for x in found]))
828 seenmerge[n] = 1
828 seenmerge[n] = 1
829 if pp[0] != nullid:
829 if pp[0] != nullid:
830 visit.append(pp[0])
830 visit.append(pp[0])
831 # traverse the branches dict, eliminating branch tags from each
831 # traverse the branches dict, eliminating branch tags from each
832 # head that are visible from another branch tag for that head.
832 # head that are visible from another branch tag for that head.
833 out = {}
833 out = {}
834 viscache = {}
834 viscache = {}
835 for h in heads:
835 for h in heads:
836 def visible(node):
836 def visible(node):
837 if node in viscache:
837 if node in viscache:
838 return viscache[node]
838 return viscache[node]
839 ret = {}
839 ret = {}
840 visit = [node]
840 visit = [node]
841 while visit:
841 while visit:
842 x = visit.pop()
842 x = visit.pop()
843 if x in viscache:
843 if x in viscache:
844 ret.update(viscache[x])
844 ret.update(viscache[x])
845 elif x not in ret:
845 elif x not in ret:
846 ret[x] = 1
846 ret[x] = 1
847 if x in branches:
847 if x in branches:
848 visit[len(visit):] = branches[x].keys()
848 visit[len(visit):] = branches[x].keys()
849 viscache[node] = ret
849 viscache[node] = ret
850 return ret
850 return ret
851 if h not in branches:
851 if h not in branches:
852 continue
852 continue
853 # O(n^2), but somewhat limited. This only searches the
853 # O(n^2), but somewhat limited. This only searches the
854 # tags visible from a specific head, not all the tags in the
854 # tags visible from a specific head, not all the tags in the
855 # whole repo.
855 # whole repo.
856 for b in branches[h]:
856 for b in branches[h]:
857 vis = False
857 vis = False
858 for bb in branches[h].keys():
858 for bb in branches[h].keys():
859 if b != bb:
859 if b != bb:
860 if b in visible(bb):
860 if b in visible(bb):
861 vis = True
861 vis = True
862 break
862 break
863 if not vis:
863 if not vis:
864 l = out.setdefault(h, [])
864 l = out.setdefault(h, [])
865 l[len(l):] = self.nodetags(b)
865 l[len(l):] = self.nodetags(b)
866 return out
866 return out
867
867
868 def branches(self, nodes):
868 def branches(self, nodes):
869 if not nodes:
869 if not nodes:
870 nodes = [self.changelog.tip()]
870 nodes = [self.changelog.tip()]
871 b = []
871 b = []
872 for n in nodes:
872 for n in nodes:
873 t = n
873 t = n
874 while 1:
874 while 1:
875 p = self.changelog.parents(n)
875 p = self.changelog.parents(n)
876 if p[1] != nullid or p[0] == nullid:
876 if p[1] != nullid or p[0] == nullid:
877 b.append((t, n, p[0], p[1]))
877 b.append((t, n, p[0], p[1]))
878 break
878 break
879 n = p[0]
879 n = p[0]
880 return b
880 return b
881
881
882 def between(self, pairs):
882 def between(self, pairs):
883 r = []
883 r = []
884
884
885 for top, bottom in pairs:
885 for top, bottom in pairs:
886 n, l, i = top, [], 0
886 n, l, i = top, [], 0
887 f = 1
887 f = 1
888
888
889 while n != bottom:
889 while n != bottom:
890 p = self.changelog.parents(n)[0]
890 p = self.changelog.parents(n)[0]
891 if i == f:
891 if i == f:
892 l.append(n)
892 l.append(n)
893 f = f * 2
893 f = f * 2
894 n = p
894 n = p
895 i += 1
895 i += 1
896
896
897 r.append(l)
897 r.append(l)
898
898
899 return r
899 return r
900
900
901 def findincoming(self, remote, base=None, heads=None, force=False):
901 def findincoming(self, remote, base=None, heads=None, force=False):
902 """Return list of roots of the subsets of missing nodes from remote
902 """Return list of roots of the subsets of missing nodes from remote
903
903
904 If base dict is specified, assume that these nodes and their parents
904 If base dict is specified, assume that these nodes and their parents
905 exist on the remote side and that no child of a node of base exists
905 exist on the remote side and that no child of a node of base exists
906 in both remote and self.
906 in both remote and self.
907 Furthermore base will be updated to include the nodes that exists
907 Furthermore base will be updated to include the nodes that exists
908 in self and remote but no children exists in self and remote.
908 in self and remote but no children exists in self and remote.
909 If a list of heads is specified, return only nodes which are heads
909 If a list of heads is specified, return only nodes which are heads
910 or ancestors of these heads.
910 or ancestors of these heads.
911
911
912 All the ancestors of base are in self and in remote.
912 All the ancestors of base are in self and in remote.
913 All the descendants of the list returned are missing in self.
913 All the descendants of the list returned are missing in self.
914 (and so we know that the rest of the nodes are missing in remote, see
914 (and so we know that the rest of the nodes are missing in remote, see
915 outgoing)
915 outgoing)
916 """
916 """
917 m = self.changelog.nodemap
917 m = self.changelog.nodemap
918 search = []
918 search = []
919 fetch = {}
919 fetch = {}
920 seen = {}
920 seen = {}
921 seenbranch = {}
921 seenbranch = {}
922 if base == None:
922 if base == None:
923 base = {}
923 base = {}
924
924
925 if not heads:
925 if not heads:
926 heads = remote.heads()
926 heads = remote.heads()
927
927
928 if self.changelog.tip() == nullid:
928 if self.changelog.tip() == nullid:
929 base[nullid] = 1
929 base[nullid] = 1
930 if heads != [nullid]:
930 if heads != [nullid]:
931 return [nullid]
931 return [nullid]
932 return []
932 return []
933
933
934 # assume we're closer to the tip than the root
934 # assume we're closer to the tip than the root
935 # and start by examining the heads
935 # and start by examining the heads
936 self.ui.status(_("searching for changes\n"))
936 self.ui.status(_("searching for changes\n"))
937
937
938 unknown = []
938 unknown = []
939 for h in heads:
939 for h in heads:
940 if h not in m:
940 if h not in m:
941 unknown.append(h)
941 unknown.append(h)
942 else:
942 else:
943 base[h] = 1
943 base[h] = 1
944
944
945 if not unknown:
945 if not unknown:
946 return []
946 return []
947
947
948 req = dict.fromkeys(unknown)
948 req = dict.fromkeys(unknown)
949 reqcnt = 0
949 reqcnt = 0
950
950
951 # search through remote branches
951 # search through remote branches
952 # a 'branch' here is a linear segment of history, with four parts:
952 # a 'branch' here is a linear segment of history, with four parts:
953 # head, root, first parent, second parent
953 # head, root, first parent, second parent
954 # (a branch always has two parents (or none) by definition)
954 # (a branch always has two parents (or none) by definition)
955 unknown = remote.branches(unknown)
955 unknown = remote.branches(unknown)
956 while unknown:
956 while unknown:
957 r = []
957 r = []
958 while unknown:
958 while unknown:
959 n = unknown.pop(0)
959 n = unknown.pop(0)
960 if n[0] in seen:
960 if n[0] in seen:
961 continue
961 continue
962
962
963 self.ui.debug(_("examining %s:%s\n")
963 self.ui.debug(_("examining %s:%s\n")
964 % (short(n[0]), short(n[1])))
964 % (short(n[0]), short(n[1])))
965 if n[0] == nullid: # found the end of the branch
965 if n[0] == nullid: # found the end of the branch
966 pass
966 pass
967 elif n in seenbranch:
967 elif n in seenbranch:
968 self.ui.debug(_("branch already found\n"))
968 self.ui.debug(_("branch already found\n"))
969 continue
969 continue
970 elif n[1] and n[1] in m: # do we know the base?
970 elif n[1] and n[1] in m: # do we know the base?
971 self.ui.debug(_("found incomplete branch %s:%s\n")
971 self.ui.debug(_("found incomplete branch %s:%s\n")
972 % (short(n[0]), short(n[1])))
972 % (short(n[0]), short(n[1])))
973 search.append(n) # schedule branch range for scanning
973 search.append(n) # schedule branch range for scanning
974 seenbranch[n] = 1
974 seenbranch[n] = 1
975 else:
975 else:
976 if n[1] not in seen and n[1] not in fetch:
976 if n[1] not in seen and n[1] not in fetch:
977 if n[2] in m and n[3] in m:
977 if n[2] in m and n[3] in m:
978 self.ui.debug(_("found new changeset %s\n") %
978 self.ui.debug(_("found new changeset %s\n") %
979 short(n[1]))
979 short(n[1]))
980 fetch[n[1]] = 1 # earliest unknown
980 fetch[n[1]] = 1 # earliest unknown
981 for p in n[2:4]:
981 for p in n[2:4]:
982 if p in m:
982 if p in m:
983 base[p] = 1 # latest known
983 base[p] = 1 # latest known
984
984
985 for p in n[2:4]:
985 for p in n[2:4]:
986 if p not in req and p not in m:
986 if p not in req and p not in m:
987 r.append(p)
987 r.append(p)
988 req[p] = 1
988 req[p] = 1
989 seen[n[0]] = 1
989 seen[n[0]] = 1
990
990
991 if r:
991 if r:
992 reqcnt += 1
992 reqcnt += 1
993 self.ui.debug(_("request %d: %s\n") %
993 self.ui.debug(_("request %d: %s\n") %
994 (reqcnt, " ".join(map(short, r))))
994 (reqcnt, " ".join(map(short, r))))
995 for p in range(0, len(r), 10):
995 for p in range(0, len(r), 10):
996 for b in remote.branches(r[p:p+10]):
996 for b in remote.branches(r[p:p+10]):
997 self.ui.debug(_("received %s:%s\n") %
997 self.ui.debug(_("received %s:%s\n") %
998 (short(b[0]), short(b[1])))
998 (short(b[0]), short(b[1])))
999 unknown.append(b)
999 unknown.append(b)
1000
1000
1001 # do binary search on the branches we found
1001 # do binary search on the branches we found
1002 while search:
1002 while search:
1003 n = search.pop(0)
1003 n = search.pop(0)
1004 reqcnt += 1
1004 reqcnt += 1
1005 l = remote.between([(n[0], n[1])])[0]
1005 l = remote.between([(n[0], n[1])])[0]
1006 l.append(n[1])
1006 l.append(n[1])
1007 p = n[0]
1007 p = n[0]
1008 f = 1
1008 f = 1
1009 for i in l:
1009 for i in l:
1010 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1010 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1011 if i in m:
1011 if i in m:
1012 if f <= 2:
1012 if f <= 2:
1013 self.ui.debug(_("found new branch changeset %s\n") %
1013 self.ui.debug(_("found new branch changeset %s\n") %
1014 short(p))
1014 short(p))
1015 fetch[p] = 1
1015 fetch[p] = 1
1016 base[i] = 1
1016 base[i] = 1
1017 else:
1017 else:
1018 self.ui.debug(_("narrowed branch search to %s:%s\n")
1018 self.ui.debug(_("narrowed branch search to %s:%s\n")
1019 % (short(p), short(i)))
1019 % (short(p), short(i)))
1020 search.append((p, i))
1020 search.append((p, i))
1021 break
1021 break
1022 p, f = i, f * 2
1022 p, f = i, f * 2
1023
1023
1024 # sanity check our fetch list
1024 # sanity check our fetch list
1025 for f in fetch.keys():
1025 for f in fetch.keys():
1026 if f in m:
1026 if f in m:
1027 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1027 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1028
1028
1029 if base.keys() == [nullid]:
1029 if base.keys() == [nullid]:
1030 if force:
1030 if force:
1031 self.ui.warn(_("warning: repository is unrelated\n"))
1031 self.ui.warn(_("warning: repository is unrelated\n"))
1032 else:
1032 else:
1033 raise util.Abort(_("repository is unrelated"))
1033 raise util.Abort(_("repository is unrelated"))
1034
1034
1035 self.ui.note(_("found new changesets starting at ") +
1035 self.ui.note(_("found new changesets starting at ") +
1036 " ".join([short(f) for f in fetch]) + "\n")
1036 " ".join([short(f) for f in fetch]) + "\n")
1037
1037
1038 self.ui.debug(_("%d total queries\n") % reqcnt)
1038 self.ui.debug(_("%d total queries\n") % reqcnt)
1039
1039
1040 return fetch.keys()
1040 return fetch.keys()
1041
1041
1042 def findoutgoing(self, remote, base=None, heads=None, force=False):
1042 def findoutgoing(self, remote, base=None, heads=None, force=False):
1043 """Return list of nodes that are roots of subsets not in remote
1043 """Return list of nodes that are roots of subsets not in remote
1044
1044
1045 If base dict is specified, assume that these nodes and their parents
1045 If base dict is specified, assume that these nodes and their parents
1046 exist on the remote side.
1046 exist on the remote side.
1047 If a list of heads is specified, return only nodes which are heads
1047 If a list of heads is specified, return only nodes which are heads
1048 or ancestors of these heads, and return a second element which
1048 or ancestors of these heads, and return a second element which
1049 contains all remote heads which get new children.
1049 contains all remote heads which get new children.
1050 """
1050 """
1051 if base == None:
1051 if base == None:
1052 base = {}
1052 base = {}
1053 self.findincoming(remote, base, heads, force=force)
1053 self.findincoming(remote, base, heads, force=force)
1054
1054
1055 self.ui.debug(_("common changesets up to ")
1055 self.ui.debug(_("common changesets up to ")
1056 + " ".join(map(short, base.keys())) + "\n")
1056 + " ".join(map(short, base.keys())) + "\n")
1057
1057
1058 remain = dict.fromkeys(self.changelog.nodemap)
1058 remain = dict.fromkeys(self.changelog.nodemap)
1059
1059
1060 # prune everything remote has from the tree
1060 # prune everything remote has from the tree
1061 del remain[nullid]
1061 del remain[nullid]
1062 remove = base.keys()
1062 remove = base.keys()
1063 while remove:
1063 while remove:
1064 n = remove.pop(0)
1064 n = remove.pop(0)
1065 if n in remain:
1065 if n in remain:
1066 del remain[n]
1066 del remain[n]
1067 for p in self.changelog.parents(n):
1067 for p in self.changelog.parents(n):
1068 remove.append(p)
1068 remove.append(p)
1069
1069
1070 # find every node whose parents have been pruned
1070 # find every node whose parents have been pruned
1071 subset = []
1071 subset = []
1072 # find every remote head that will get new children
1072 # find every remote head that will get new children
1073 updated_heads = {}
1073 updated_heads = {}
1074 for n in remain:
1074 for n in remain:
1075 p1, p2 = self.changelog.parents(n)
1075 p1, p2 = self.changelog.parents(n)
1076 if p1 not in remain and p2 not in remain:
1076 if p1 not in remain and p2 not in remain:
1077 subset.append(n)
1077 subset.append(n)
1078 if heads:
1078 if heads:
1079 if p1 in heads:
1079 if p1 in heads:
1080 updated_heads[p1] = True
1080 updated_heads[p1] = True
1081 if p2 in heads:
1081 if p2 in heads:
1082 updated_heads[p2] = True
1082 updated_heads[p2] = True
1083
1083
1084 # this is the set of all roots we have to push
1084 # this is the set of all roots we have to push
1085 if heads:
1085 if heads:
1086 return subset, updated_heads.keys()
1086 return subset, updated_heads.keys()
1087 else:
1087 else:
1088 return subset
1088 return subset
1089
1089
1090 def pull(self, remote, heads=None, force=False):
1090 def pull(self, remote, heads=None, force=False):
1091 l = self.lock()
1091 l = self.lock()
1092
1092
1093 fetch = self.findincoming(remote, force=force)
1093 fetch = self.findincoming(remote, force=force)
1094 if fetch == [nullid]:
1094 if fetch == [nullid]:
1095 self.ui.status(_("requesting all changes\n"))
1095 self.ui.status(_("requesting all changes\n"))
1096
1096
1097 if not fetch:
1097 if not fetch:
1098 self.ui.status(_("no changes found\n"))
1098 self.ui.status(_("no changes found\n"))
1099 return 0
1099 return 0
1100
1100
1101 if heads is None:
1101 if heads is None:
1102 cg = remote.changegroup(fetch, 'pull')
1102 cg = remote.changegroup(fetch, 'pull')
1103 else:
1103 else:
1104 cg = remote.changegroupsubset(fetch, heads, 'pull')
1104 cg = remote.changegroupsubset(fetch, heads, 'pull')
1105 return self.addchangegroup(cg, 'pull')
1105 return self.addchangegroup(cg, 'pull')
1106
1106
1107 def push(self, remote, force=False, revs=None):
1107 def push(self, remote, force=False, revs=None):
1108 lock = remote.lock()
1108 lock = remote.lock()
1109
1109
1110 base = {}
1110 base = {}
1111 remote_heads = remote.heads()
1111 remote_heads = remote.heads()
1112 inc = self.findincoming(remote, base, remote_heads, force=force)
1112 inc = self.findincoming(remote, base, remote_heads, force=force)
1113 if not force and inc:
1113 if not force and inc:
1114 self.ui.warn(_("abort: unsynced remote changes!\n"))
1114 self.ui.warn(_("abort: unsynced remote changes!\n"))
1115 self.ui.status(_("(did you forget to sync?"
1115 self.ui.status(_("(did you forget to sync?"
1116 " use push -f to force)\n"))
1116 " use push -f to force)\n"))
1117 return 1
1117 return 1
1118
1118
1119 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1119 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1120 if revs is not None:
1120 if revs is not None:
1121 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1121 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1122 else:
1122 else:
1123 bases, heads = update, self.changelog.heads()
1123 bases, heads = update, self.changelog.heads()
1124
1124
1125 if not bases:
1125 if not bases:
1126 self.ui.status(_("no changes found\n"))
1126 self.ui.status(_("no changes found\n"))
1127 return 1
1127 return 1
1128 elif not force:
1128 elif not force:
1129 # FIXME we don't properly detect creation of new heads
1129 # FIXME we don't properly detect creation of new heads
1130 # in the push -r case, assume the user knows what he's doing
1130 # in the push -r case, assume the user knows what he's doing
1131 if not revs and len(remote_heads) < len(heads) \
1131 if not revs and len(remote_heads) < len(heads) \
1132 and remote_heads != [nullid]:
1132 and remote_heads != [nullid]:
1133 self.ui.warn(_("abort: push creates new remote branches!\n"))
1133 self.ui.warn(_("abort: push creates new remote branches!\n"))
1134 self.ui.status(_("(did you forget to merge?"
1134 self.ui.status(_("(did you forget to merge?"
1135 " use push -f to force)\n"))
1135 " use push -f to force)\n"))
1136 return 1
1136 return 1
1137
1137
1138 if revs is None:
1138 if revs is None:
1139 cg = self.changegroup(update, 'push')
1139 cg = self.changegroup(update, 'push')
1140 else:
1140 else:
1141 cg = self.changegroupsubset(update, revs, 'push')
1141 cg = self.changegroupsubset(update, revs, 'push')
1142 return remote.addchangegroup(cg, 'push')
1142 return remote.addchangegroup(cg, 'push')
1143
1143
1144 def changegroupsubset(self, bases, heads, source):
1144 def changegroupsubset(self, bases, heads, source):
1145 """This function generates a changegroup consisting of all the nodes
1145 """This function generates a changegroup consisting of all the nodes
1146 that are descendents of any of the bases, and ancestors of any of
1146 that are descendents of any of the bases, and ancestors of any of
1147 the heads.
1147 the heads.
1148
1148
1149 It is fairly complex as determining which filenodes and which
1149 It is fairly complex as determining which filenodes and which
1150 manifest nodes need to be included for the changeset to be complete
1150 manifest nodes need to be included for the changeset to be complete
1151 is non-trivial.
1151 is non-trivial.
1152
1152
1153 Another wrinkle is doing the reverse, figuring out which changeset in
1153 Another wrinkle is doing the reverse, figuring out which changeset in
1154 the changegroup a particular filenode or manifestnode belongs to."""
1154 the changegroup a particular filenode or manifestnode belongs to."""
1155
1155
1156 self.hook('preoutgoing', throw=True, source=source)
1156 self.hook('preoutgoing', throw=True, source=source)
1157
1157
1158 # Set up some initial variables
1158 # Set up some initial variables
1159 # Make it easy to refer to self.changelog
1159 # Make it easy to refer to self.changelog
1160 cl = self.changelog
1160 cl = self.changelog
1161 # msng is short for missing - compute the list of changesets in this
1161 # msng is short for missing - compute the list of changesets in this
1162 # changegroup.
1162 # changegroup.
1163 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1163 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1164 # Some bases may turn out to be superfluous, and some heads may be
1164 # Some bases may turn out to be superfluous, and some heads may be
1165 # too. nodesbetween will return the minimal set of bases and heads
1165 # too. nodesbetween will return the minimal set of bases and heads
1166 # necessary to re-create the changegroup.
1166 # necessary to re-create the changegroup.
1167
1167
1168 # Known heads are the list of heads that it is assumed the recipient
1168 # Known heads are the list of heads that it is assumed the recipient
1169 # of this changegroup will know about.
1169 # of this changegroup will know about.
1170 knownheads = {}
1170 knownheads = {}
1171 # We assume that all parents of bases are known heads.
1171 # We assume that all parents of bases are known heads.
1172 for n in bases:
1172 for n in bases:
1173 for p in cl.parents(n):
1173 for p in cl.parents(n):
1174 if p != nullid:
1174 if p != nullid:
1175 knownheads[p] = 1
1175 knownheads[p] = 1
1176 knownheads = knownheads.keys()
1176 knownheads = knownheads.keys()
1177 if knownheads:
1177 if knownheads:
1178 # Now that we know what heads are known, we can compute which
1178 # Now that we know what heads are known, we can compute which
1179 # changesets are known. The recipient must know about all
1179 # changesets are known. The recipient must know about all
1180 # changesets required to reach the known heads from the null
1180 # changesets required to reach the known heads from the null
1181 # changeset.
1181 # changeset.
1182 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1182 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1183 junk = None
1183 junk = None
1184 # Transform the list into an ersatz set.
1184 # Transform the list into an ersatz set.
1185 has_cl_set = dict.fromkeys(has_cl_set)
1185 has_cl_set = dict.fromkeys(has_cl_set)
1186 else:
1186 else:
1187 # If there were no known heads, the recipient cannot be assumed to
1187 # If there were no known heads, the recipient cannot be assumed to
1188 # know about any changesets.
1188 # know about any changesets.
1189 has_cl_set = {}
1189 has_cl_set = {}
1190
1190
1191 # Make it easy to refer to self.manifest
1191 # Make it easy to refer to self.manifest
1192 mnfst = self.manifest
1192 mnfst = self.manifest
1193 # We don't know which manifests are missing yet
1193 # We don't know which manifests are missing yet
1194 msng_mnfst_set = {}
1194 msng_mnfst_set = {}
1195 # Nor do we know which filenodes are missing.
1195 # Nor do we know which filenodes are missing.
1196 msng_filenode_set = {}
1196 msng_filenode_set = {}
1197
1197
1198 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1198 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1199 junk = None
1199 junk = None
1200
1200
1201 # A changeset always belongs to itself, so the changenode lookup
1201 # A changeset always belongs to itself, so the changenode lookup
1202 # function for a changenode is identity.
1202 # function for a changenode is identity.
1203 def identity(x):
1203 def identity(x):
1204 return x
1204 return x
1205
1205
1206 # A function generating function. Sets up an environment for the
1206 # A function generating function. Sets up an environment for the
1207 # inner function.
1207 # inner function.
1208 def cmp_by_rev_func(revlog):
1208 def cmp_by_rev_func(revlog):
1209 # Compare two nodes by their revision number in the environment's
1209 # Compare two nodes by their revision number in the environment's
1210 # revision history. Since the revision number both represents the
1210 # revision history. Since the revision number both represents the
1211 # most efficient order to read the nodes in, and represents a
1211 # most efficient order to read the nodes in, and represents a
1212 # topological sorting of the nodes, this function is often useful.
1212 # topological sorting of the nodes, this function is often useful.
1213 def cmp_by_rev(a, b):
1213 def cmp_by_rev(a, b):
1214 return cmp(revlog.rev(a), revlog.rev(b))
1214 return cmp(revlog.rev(a), revlog.rev(b))
1215 return cmp_by_rev
1215 return cmp_by_rev
1216
1216
1217 # If we determine that a particular file or manifest node must be a
1217 # If we determine that a particular file or manifest node must be a
1218 # node that the recipient of the changegroup will already have, we can
1218 # node that the recipient of the changegroup will already have, we can
1219 # also assume the recipient will have all the parents. This function
1219 # also assume the recipient will have all the parents. This function
1220 # prunes them from the set of missing nodes.
1220 # prunes them from the set of missing nodes.
1221 def prune_parents(revlog, hasset, msngset):
1221 def prune_parents(revlog, hasset, msngset):
1222 haslst = hasset.keys()
1222 haslst = hasset.keys()
1223 haslst.sort(cmp_by_rev_func(revlog))
1223 haslst.sort(cmp_by_rev_func(revlog))
1224 for node in haslst:
1224 for node in haslst:
1225 parentlst = [p for p in revlog.parents(node) if p != nullid]
1225 parentlst = [p for p in revlog.parents(node) if p != nullid]
1226 while parentlst:
1226 while parentlst:
1227 n = parentlst.pop()
1227 n = parentlst.pop()
1228 if n not in hasset:
1228 if n not in hasset:
1229 hasset[n] = 1
1229 hasset[n] = 1
1230 p = [p for p in revlog.parents(n) if p != nullid]
1230 p = [p for p in revlog.parents(n) if p != nullid]
1231 parentlst.extend(p)
1231 parentlst.extend(p)
1232 for n in hasset:
1232 for n in hasset:
1233 msngset.pop(n, None)
1233 msngset.pop(n, None)
1234
1234
1235 # This is a function generating function used to set up an environment
1235 # This is a function generating function used to set up an environment
1236 # for the inner function to execute in.
1236 # for the inner function to execute in.
1237 def manifest_and_file_collector(changedfileset):
1237 def manifest_and_file_collector(changedfileset):
1238 # This is an information gathering function that gathers
1238 # This is an information gathering function that gathers
1239 # information from each changeset node that goes out as part of
1239 # information from each changeset node that goes out as part of
1240 # the changegroup. The information gathered is a list of which
1240 # the changegroup. The information gathered is a list of which
1241 # manifest nodes are potentially required (the recipient may
1241 # manifest nodes are potentially required (the recipient may
1242 # already have them) and total list of all files which were
1242 # already have them) and total list of all files which were
1243 # changed in any changeset in the changegroup.
1243 # changed in any changeset in the changegroup.
1244 #
1244 #
1245 # We also remember the first changenode we saw any manifest
1245 # We also remember the first changenode we saw any manifest
1246 # referenced by so we can later determine which changenode 'owns'
1246 # referenced by so we can later determine which changenode 'owns'
1247 # the manifest.
1247 # the manifest.
1248 def collect_manifests_and_files(clnode):
1248 def collect_manifests_and_files(clnode):
1249 c = cl.read(clnode)
1249 c = cl.read(clnode)
1250 for f in c[3]:
1250 for f in c[3]:
1251 # This is to make sure we only have one instance of each
1251 # This is to make sure we only have one instance of each
1252 # filename string for each filename.
1252 # filename string for each filename.
1253 changedfileset.setdefault(f, f)
1253 changedfileset.setdefault(f, f)
1254 msng_mnfst_set.setdefault(c[0], clnode)
1254 msng_mnfst_set.setdefault(c[0], clnode)
1255 return collect_manifests_and_files
1255 return collect_manifests_and_files
1256
1256
1257 # Figure out which manifest nodes (of the ones we think might be part
1257 # Figure out which manifest nodes (of the ones we think might be part
1258 # of the changegroup) the recipient must know about and remove them
1258 # of the changegroup) the recipient must know about and remove them
1259 # from the changegroup.
1259 # from the changegroup.
1260 def prune_manifests():
1260 def prune_manifests():
1261 has_mnfst_set = {}
1261 has_mnfst_set = {}
1262 for n in msng_mnfst_set:
1262 for n in msng_mnfst_set:
1263 # If a 'missing' manifest thinks it belongs to a changenode
1263 # If a 'missing' manifest thinks it belongs to a changenode
1264 # the recipient is assumed to have, obviously the recipient
1264 # the recipient is assumed to have, obviously the recipient
1265 # must have that manifest.
1265 # must have that manifest.
1266 linknode = cl.node(mnfst.linkrev(n))
1266 linknode = cl.node(mnfst.linkrev(n))
1267 if linknode in has_cl_set:
1267 if linknode in has_cl_set:
1268 has_mnfst_set[n] = 1
1268 has_mnfst_set[n] = 1
1269 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1269 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1270
1270
1271 # Use the information collected in collect_manifests_and_files to say
1271 # Use the information collected in collect_manifests_and_files to say
1272 # which changenode any manifestnode belongs to.
1272 # which changenode any manifestnode belongs to.
1273 def lookup_manifest_link(mnfstnode):
1273 def lookup_manifest_link(mnfstnode):
1274 return msng_mnfst_set[mnfstnode]
1274 return msng_mnfst_set[mnfstnode]
1275
1275
1276 # A function generating function that sets up the initial environment
1276 # A function generating function that sets up the initial environment
1277 # the inner function.
1277 # the inner function.
1278 def filenode_collector(changedfiles):
1278 def filenode_collector(changedfiles):
1279 next_rev = [0]
1279 next_rev = [0]
1280 # This gathers information from each manifestnode included in the
1280 # This gathers information from each manifestnode included in the
1281 # changegroup about which filenodes the manifest node references
1281 # changegroup about which filenodes the manifest node references
1282 # so we can include those in the changegroup too.
1282 # so we can include those in the changegroup too.
1283 #
1283 #
1284 # It also remembers which changenode each filenode belongs to. It
1284 # It also remembers which changenode each filenode belongs to. It
1285 # does this by assuming the a filenode belongs to the changenode
1285 # does this by assuming the a filenode belongs to the changenode
1286 # the first manifest that references it belongs to.
1286 # the first manifest that references it belongs to.
1287 def collect_msng_filenodes(mnfstnode):
1287 def collect_msng_filenodes(mnfstnode):
1288 r = mnfst.rev(mnfstnode)
1288 r = mnfst.rev(mnfstnode)
1289 if r == next_rev[0]:
1289 if r == next_rev[0]:
1290 # If the last rev we looked at was the one just previous,
1290 # If the last rev we looked at was the one just previous,
1291 # we only need to see a diff.
1291 # we only need to see a diff.
1292 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1292 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1293 # For each line in the delta
1293 # For each line in the delta
1294 for dline in delta.splitlines():
1294 for dline in delta.splitlines():
1295 # get the filename and filenode for that line
1295 # get the filename and filenode for that line
1296 f, fnode = dline.split('\0')
1296 f, fnode = dline.split('\0')
1297 fnode = bin(fnode[:40])
1297 fnode = bin(fnode[:40])
1298 f = changedfiles.get(f, None)
1298 f = changedfiles.get(f, None)
1299 # And if the file is in the list of files we care
1299 # And if the file is in the list of files we care
1300 # about.
1300 # about.
1301 if f is not None:
1301 if f is not None:
1302 # Get the changenode this manifest belongs to
1302 # Get the changenode this manifest belongs to
1303 clnode = msng_mnfst_set[mnfstnode]
1303 clnode = msng_mnfst_set[mnfstnode]
1304 # Create the set of filenodes for the file if
1304 # Create the set of filenodes for the file if
1305 # there isn't one already.
1305 # there isn't one already.
1306 ndset = msng_filenode_set.setdefault(f, {})
1306 ndset = msng_filenode_set.setdefault(f, {})
1307 # And set the filenode's changelog node to the
1307 # And set the filenode's changelog node to the
1308 # manifest's if it hasn't been set already.
1308 # manifest's if it hasn't been set already.
1309 ndset.setdefault(fnode, clnode)
1309 ndset.setdefault(fnode, clnode)
1310 else:
1310 else:
1311 # Otherwise we need a full manifest.
1311 # Otherwise we need a full manifest.
1312 m = mnfst.read(mnfstnode)
1312 m = mnfst.read(mnfstnode)
1313 # For every file in we care about.
1313 # For every file in we care about.
1314 for f in changedfiles:
1314 for f in changedfiles:
1315 fnode = m.get(f, None)
1315 fnode = m.get(f, None)
1316 # If it's in the manifest
1316 # If it's in the manifest
1317 if fnode is not None:
1317 if fnode is not None:
1318 # See comments above.
1318 # See comments above.
1319 clnode = msng_mnfst_set[mnfstnode]
1319 clnode = msng_mnfst_set[mnfstnode]
1320 ndset = msng_filenode_set.setdefault(f, {})
1320 ndset = msng_filenode_set.setdefault(f, {})
1321 ndset.setdefault(fnode, clnode)
1321 ndset.setdefault(fnode, clnode)
1322 # Remember the revision we hope to see next.
1322 # Remember the revision we hope to see next.
1323 next_rev[0] = r + 1
1323 next_rev[0] = r + 1
1324 return collect_msng_filenodes
1324 return collect_msng_filenodes
1325
1325
1326 # We have a list of filenodes we think we need for a file, lets remove
1326 # We have a list of filenodes we think we need for a file, lets remove
1327 # all those we now the recipient must have.
1327 # all those we now the recipient must have.
1328 def prune_filenodes(f, filerevlog):
1328 def prune_filenodes(f, filerevlog):
1329 msngset = msng_filenode_set[f]
1329 msngset = msng_filenode_set[f]
1330 hasset = {}
1330 hasset = {}
1331 # If a 'missing' filenode thinks it belongs to a changenode we
1331 # If a 'missing' filenode thinks it belongs to a changenode we
1332 # assume the recipient must have, then the recipient must have
1332 # assume the recipient must have, then the recipient must have
1333 # that filenode.
1333 # that filenode.
1334 for n in msngset:
1334 for n in msngset:
1335 clnode = cl.node(filerevlog.linkrev(n))
1335 clnode = cl.node(filerevlog.linkrev(n))
1336 if clnode in has_cl_set:
1336 if clnode in has_cl_set:
1337 hasset[n] = 1
1337 hasset[n] = 1
1338 prune_parents(filerevlog, hasset, msngset)
1338 prune_parents(filerevlog, hasset, msngset)
1339
1339
1340 # A function generator function that sets up the a context for the
1340 # A function generator function that sets up the a context for the
1341 # inner function.
1341 # inner function.
1342 def lookup_filenode_link_func(fname):
1342 def lookup_filenode_link_func(fname):
1343 msngset = msng_filenode_set[fname]
1343 msngset = msng_filenode_set[fname]
1344 # Lookup the changenode the filenode belongs to.
1344 # Lookup the changenode the filenode belongs to.
1345 def lookup_filenode_link(fnode):
1345 def lookup_filenode_link(fnode):
1346 return msngset[fnode]
1346 return msngset[fnode]
1347 return lookup_filenode_link
1347 return lookup_filenode_link
1348
1348
1349 # Now that we have all theses utility functions to help out and
1349 # Now that we have all theses utility functions to help out and
1350 # logically divide up the task, generate the group.
1350 # logically divide up the task, generate the group.
1351 def gengroup():
1351 def gengroup():
1352 # The set of changed files starts empty.
1352 # The set of changed files starts empty.
1353 changedfiles = {}
1353 changedfiles = {}
1354 # Create a changenode group generator that will call our functions
1354 # Create a changenode group generator that will call our functions
1355 # back to lookup the owning changenode and collect information.
1355 # back to lookup the owning changenode and collect information.
1356 group = cl.group(msng_cl_lst, identity,
1356 group = cl.group(msng_cl_lst, identity,
1357 manifest_and_file_collector(changedfiles))
1357 manifest_and_file_collector(changedfiles))
1358 for chnk in group:
1358 for chnk in group:
1359 yield chnk
1359 yield chnk
1360
1360
1361 # The list of manifests has been collected by the generator
1361 # The list of manifests has been collected by the generator
1362 # calling our functions back.
1362 # calling our functions back.
1363 prune_manifests()
1363 prune_manifests()
1364 msng_mnfst_lst = msng_mnfst_set.keys()
1364 msng_mnfst_lst = msng_mnfst_set.keys()
1365 # Sort the manifestnodes by revision number.
1365 # Sort the manifestnodes by revision number.
1366 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1366 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1367 # Create a generator for the manifestnodes that calls our lookup
1367 # Create a generator for the manifestnodes that calls our lookup
1368 # and data collection functions back.
1368 # and data collection functions back.
1369 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1369 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1370 filenode_collector(changedfiles))
1370 filenode_collector(changedfiles))
1371 for chnk in group:
1371 for chnk in group:
1372 yield chnk
1372 yield chnk
1373
1373
1374 # These are no longer needed, dereference and toss the memory for
1374 # These are no longer needed, dereference and toss the memory for
1375 # them.
1375 # them.
1376 msng_mnfst_lst = None
1376 msng_mnfst_lst = None
1377 msng_mnfst_set.clear()
1377 msng_mnfst_set.clear()
1378
1378
1379 changedfiles = changedfiles.keys()
1379 changedfiles = changedfiles.keys()
1380 changedfiles.sort()
1380 changedfiles.sort()
1381 # Go through all our files in order sorted by name.
1381 # Go through all our files in order sorted by name.
1382 for fname in changedfiles:
1382 for fname in changedfiles:
1383 filerevlog = self.file(fname)
1383 filerevlog = self.file(fname)
1384 # Toss out the filenodes that the recipient isn't really
1384 # Toss out the filenodes that the recipient isn't really
1385 # missing.
1385 # missing.
1386 if msng_filenode_set.has_key(fname):
1386 if msng_filenode_set.has_key(fname):
1387 prune_filenodes(fname, filerevlog)
1387 prune_filenodes(fname, filerevlog)
1388 msng_filenode_lst = msng_filenode_set[fname].keys()
1388 msng_filenode_lst = msng_filenode_set[fname].keys()
1389 else:
1389 else:
1390 msng_filenode_lst = []
1390 msng_filenode_lst = []
1391 # If any filenodes are left, generate the group for them,
1391 # If any filenodes are left, generate the group for them,
1392 # otherwise don't bother.
1392 # otherwise don't bother.
1393 if len(msng_filenode_lst) > 0:
1393 if len(msng_filenode_lst) > 0:
1394 yield changegroup.genchunk(fname)
1394 yield changegroup.genchunk(fname)
1395 # Sort the filenodes by their revision #
1395 # Sort the filenodes by their revision #
1396 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1396 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1397 # Create a group generator and only pass in a changenode
1397 # Create a group generator and only pass in a changenode
1398 # lookup function as we need to collect no information
1398 # lookup function as we need to collect no information
1399 # from filenodes.
1399 # from filenodes.
1400 group = filerevlog.group(msng_filenode_lst,
1400 group = filerevlog.group(msng_filenode_lst,
1401 lookup_filenode_link_func(fname))
1401 lookup_filenode_link_func(fname))
1402 for chnk in group:
1402 for chnk in group:
1403 yield chnk
1403 yield chnk
1404 if msng_filenode_set.has_key(fname):
1404 if msng_filenode_set.has_key(fname):
1405 # Don't need this anymore, toss it to free memory.
1405 # Don't need this anymore, toss it to free memory.
1406 del msng_filenode_set[fname]
1406 del msng_filenode_set[fname]
1407 # Signal that no more groups are left.
1407 # Signal that no more groups are left.
1408 yield changegroup.closechunk()
1408 yield changegroup.closechunk()
1409
1409
1410 if msng_cl_lst:
1410 if msng_cl_lst:
1411 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1411 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1412
1412
1413 return util.chunkbuffer(gengroup())
1413 return util.chunkbuffer(gengroup())
1414
1414
1415 def changegroup(self, basenodes, source):
1415 def changegroup(self, basenodes, source):
1416 """Generate a changegroup of all nodes that we have that a recipient
1416 """Generate a changegroup of all nodes that we have that a recipient
1417 doesn't.
1417 doesn't.
1418
1418
1419 This is much easier than the previous function as we can assume that
1419 This is much easier than the previous function as we can assume that
1420 the recipient has any changenode we aren't sending them."""
1420 the recipient has any changenode we aren't sending them."""
1421
1421
1422 self.hook('preoutgoing', throw=True, source=source)
1422 self.hook('preoutgoing', throw=True, source=source)
1423
1423
1424 cl = self.changelog
1424 cl = self.changelog
1425 nodes = cl.nodesbetween(basenodes, None)[0]
1425 nodes = cl.nodesbetween(basenodes, None)[0]
1426 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1426 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1427
1427
1428 def identity(x):
1428 def identity(x):
1429 return x
1429 return x
1430
1430
1431 def gennodelst(revlog):
1431 def gennodelst(revlog):
1432 for r in xrange(0, revlog.count()):
1432 for r in xrange(0, revlog.count()):
1433 n = revlog.node(r)
1433 n = revlog.node(r)
1434 if revlog.linkrev(n) in revset:
1434 if revlog.linkrev(n) in revset:
1435 yield n
1435 yield n
1436
1436
1437 def changed_file_collector(changedfileset):
1437 def changed_file_collector(changedfileset):
1438 def collect_changed_files(clnode):
1438 def collect_changed_files(clnode):
1439 c = cl.read(clnode)
1439 c = cl.read(clnode)
1440 for fname in c[3]:
1440 for fname in c[3]:
1441 changedfileset[fname] = 1
1441 changedfileset[fname] = 1
1442 return collect_changed_files
1442 return collect_changed_files
1443
1443
1444 def lookuprevlink_func(revlog):
1444 def lookuprevlink_func(revlog):
1445 def lookuprevlink(n):
1445 def lookuprevlink(n):
1446 return cl.node(revlog.linkrev(n))
1446 return cl.node(revlog.linkrev(n))
1447 return lookuprevlink
1447 return lookuprevlink
1448
1448
1449 def gengroup():
1449 def gengroup():
1450 # construct a list of all changed files
1450 # construct a list of all changed files
1451 changedfiles = {}
1451 changedfiles = {}
1452
1452
1453 for chnk in cl.group(nodes, identity,
1453 for chnk in cl.group(nodes, identity,
1454 changed_file_collector(changedfiles)):
1454 changed_file_collector(changedfiles)):
1455 yield chnk
1455 yield chnk
1456 changedfiles = changedfiles.keys()
1456 changedfiles = changedfiles.keys()
1457 changedfiles.sort()
1457 changedfiles.sort()
1458
1458
1459 mnfst = self.manifest
1459 mnfst = self.manifest
1460 nodeiter = gennodelst(mnfst)
1460 nodeiter = gennodelst(mnfst)
1461 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1461 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1462 yield chnk
1462 yield chnk
1463
1463
1464 for fname in changedfiles:
1464 for fname in changedfiles:
1465 filerevlog = self.file(fname)
1465 filerevlog = self.file(fname)
1466 nodeiter = gennodelst(filerevlog)
1466 nodeiter = gennodelst(filerevlog)
1467 nodeiter = list(nodeiter)
1467 nodeiter = list(nodeiter)
1468 if nodeiter:
1468 if nodeiter:
1469 yield changegroup.genchunk(fname)
1469 yield changegroup.genchunk(fname)
1470 lookup = lookuprevlink_func(filerevlog)
1470 lookup = lookuprevlink_func(filerevlog)
1471 for chnk in filerevlog.group(nodeiter, lookup):
1471 for chnk in filerevlog.group(nodeiter, lookup):
1472 yield chnk
1472 yield chnk
1473
1473
1474 yield changegroup.closechunk()
1474 yield changegroup.closechunk()
1475
1475
1476 if nodes:
1476 if nodes:
1477 self.hook('outgoing', node=hex(nodes[0]), source=source)
1477 self.hook('outgoing', node=hex(nodes[0]), source=source)
1478
1478
1479 return util.chunkbuffer(gengroup())
1479 return util.chunkbuffer(gengroup())
1480
1480
1481 def addchangegroup(self, source, srctype):
1481 def addchangegroup(self, source, srctype):
1482 """add changegroup to repo.
1482 """add changegroup to repo.
1483 returns number of heads modified or added + 1."""
1483 returns number of heads modified or added + 1."""
1484
1484
1485 def csmap(x):
1485 def csmap(x):
1486 self.ui.debug(_("add changeset %s\n") % short(x))
1486 self.ui.debug(_("add changeset %s\n") % short(x))
1487 return cl.count()
1487 return cl.count()
1488
1488
1489 def revmap(x):
1489 def revmap(x):
1490 return cl.rev(x)
1490 return cl.rev(x)
1491
1491
1492 if not source:
1492 if not source:
1493 return 0
1493 return 0
1494
1494
1495 self.hook('prechangegroup', throw=True, source=srctype)
1495 self.hook('prechangegroup', throw=True, source=srctype)
1496
1496
1497 changesets = files = revisions = 0
1497 changesets = files = revisions = 0
1498
1498
1499 tr = self.transaction()
1499 tr = self.transaction()
1500
1500
1501 # write changelog and manifest data to temp files so
1501 # write changelog data to temp files so concurrent readers will not see
1502 # concurrent readers will not see inconsistent view
1502 # inconsistent view
1503 cl = None
1503 cl = None
1504 try:
1504 try:
1505 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1505 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1506
1506
1507 oldheads = len(cl.heads())
1507 oldheads = len(cl.heads())
1508
1508
1509 # pull off the changeset group
1509 # pull off the changeset group
1510 self.ui.status(_("adding changesets\n"))
1510 self.ui.status(_("adding changesets\n"))
1511 cor = cl.count() - 1
1511 cor = cl.count() - 1
1512 chunkiter = changegroup.chunkiter(source)
1512 chunkiter = changegroup.chunkiter(source)
1513 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1513 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1514 raise util.Abort(_("received changelog group is empty"))
1514 raise util.Abort(_("received changelog group is empty"))
1515 cnr = cl.count() - 1
1515 cnr = cl.count() - 1
1516 changesets = cnr - cor
1516 changesets = cnr - cor
1517
1517
1518 mf = None
1518 # pull off the manifest group
1519 try:
1519 self.ui.status(_("adding manifests\n"))
1520 mf = appendfile.appendmanifest(self.opener,
1520 chunkiter = changegroup.chunkiter(source)
1521 self.manifest.version)
1521 # no need to check for empty manifest group here:
1522
1522 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1523 # pull off the manifest group
1523 # no new manifest will be created and the manifest group will
1524 self.ui.status(_("adding manifests\n"))
1524 # be empty during the pull
1525 chunkiter = changegroup.chunkiter(source)
1525 self.manifest.addgroup(chunkiter, revmap, tr)
1526 # no need to check for empty manifest group here:
1527 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1528 # no new manifest will be created and the manifest group will
1529 # be empty during the pull
1530 mf.addgroup(chunkiter, revmap, tr)
1531
1526
1532 # process the files
1527 # process the files
1533 self.ui.status(_("adding file changes\n"))
1528 self.ui.status(_("adding file changes\n"))
1534 while 1:
1529 while 1:
1535 f = changegroup.getchunk(source)
1530 f = changegroup.getchunk(source)
1536 if not f:
1531 if not f:
1537 break
1532 break
1538 self.ui.debug(_("adding %s revisions\n") % f)
1533 self.ui.debug(_("adding %s revisions\n") % f)
1539 fl = self.file(f)
1534 fl = self.file(f)
1540 o = fl.count()
1535 o = fl.count()
1541 chunkiter = changegroup.chunkiter(source)
1536 chunkiter = changegroup.chunkiter(source)
1542 if fl.addgroup(chunkiter, revmap, tr) is None:
1537 if fl.addgroup(chunkiter, revmap, tr) is None:
1543 raise util.Abort(_("received file revlog group is empty"))
1538 raise util.Abort(_("received file revlog group is empty"))
1544 revisions += fl.count() - o
1539 revisions += fl.count() - o
1545 files += 1
1540 files += 1
1546
1541
1547 # write order here is important so concurrent readers will see
1548 # consistent view of repo
1549 mf.writedata()
1550 finally:
1551 if mf:
1552 mf.cleanup()
1553 cl.writedata()
1542 cl.writedata()
1554 finally:
1543 finally:
1555 if cl:
1544 if cl:
1556 cl.cleanup()
1545 cl.cleanup()
1557
1546
1558 # make changelog and manifest see real files again
1547 # make changelog see real files again
1559 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1548 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1560 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1561 self.changelog.checkinlinesize(tr)
1549 self.changelog.checkinlinesize(tr)
1562 self.manifest.checkinlinesize(tr)
1563
1550
1564 newheads = len(self.changelog.heads())
1551 newheads = len(self.changelog.heads())
1565 heads = ""
1552 heads = ""
1566 if oldheads and newheads > oldheads:
1553 if oldheads and newheads > oldheads:
1567 heads = _(" (+%d heads)") % (newheads - oldheads)
1554 heads = _(" (+%d heads)") % (newheads - oldheads)
1568
1555
1569 self.ui.status(_("added %d changesets"
1556 self.ui.status(_("added %d changesets"
1570 " with %d changes to %d files%s\n")
1557 " with %d changes to %d files%s\n")
1571 % (changesets, revisions, files, heads))
1558 % (changesets, revisions, files, heads))
1572
1559
1573 if changesets > 0:
1560 if changesets > 0:
1574 self.hook('pretxnchangegroup', throw=True,
1561 self.hook('pretxnchangegroup', throw=True,
1575 node=hex(self.changelog.node(cor+1)), source=srctype)
1562 node=hex(self.changelog.node(cor+1)), source=srctype)
1576
1563
1577 tr.close()
1564 tr.close()
1578
1565
1579 if changesets > 0:
1566 if changesets > 0:
1580 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1567 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1581 source=srctype)
1568 source=srctype)
1582
1569
1583 for i in range(cor + 1, cnr + 1):
1570 for i in range(cor + 1, cnr + 1):
1584 self.hook("incoming", node=hex(self.changelog.node(i)),
1571 self.hook("incoming", node=hex(self.changelog.node(i)),
1585 source=srctype)
1572 source=srctype)
1586
1573
1587 return newheads - oldheads + 1
1574 return newheads - oldheads + 1
1588
1575
1589 def update(self, node, allow=False, force=False, choose=None,
1576 def update(self, node, allow=False, force=False, choose=None,
1590 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1577 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1591 pl = self.dirstate.parents()
1578 pl = self.dirstate.parents()
1592 if not force and pl[1] != nullid:
1579 if not force and pl[1] != nullid:
1593 raise util.Abort(_("outstanding uncommitted merges"))
1580 raise util.Abort(_("outstanding uncommitted merges"))
1594
1581
1595 err = False
1582 err = False
1596
1583
1597 p1, p2 = pl[0], node
1584 p1, p2 = pl[0], node
1598 pa = self.changelog.ancestor(p1, p2)
1585 pa = self.changelog.ancestor(p1, p2)
1599 m1n = self.changelog.read(p1)[0]
1586 m1n = self.changelog.read(p1)[0]
1600 m2n = self.changelog.read(p2)[0]
1587 m2n = self.changelog.read(p2)[0]
1601 man = self.manifest.ancestor(m1n, m2n)
1588 man = self.manifest.ancestor(m1n, m2n)
1602 m1 = self.manifest.read(m1n)
1589 m1 = self.manifest.read(m1n)
1603 mf1 = self.manifest.readflags(m1n)
1590 mf1 = self.manifest.readflags(m1n)
1604 m2 = self.manifest.read(m2n).copy()
1591 m2 = self.manifest.read(m2n).copy()
1605 mf2 = self.manifest.readflags(m2n)
1592 mf2 = self.manifest.readflags(m2n)
1606 ma = self.manifest.read(man)
1593 ma = self.manifest.read(man)
1607 mfa = self.manifest.readflags(man)
1594 mfa = self.manifest.readflags(man)
1608
1595
1609 modified, added, removed, deleted, unknown = self.changes()
1596 modified, added, removed, deleted, unknown = self.changes()
1610
1597
1611 # is this a jump, or a merge? i.e. is there a linear path
1598 # is this a jump, or a merge? i.e. is there a linear path
1612 # from p1 to p2?
1599 # from p1 to p2?
1613 linear_path = (pa == p1 or pa == p2)
1600 linear_path = (pa == p1 or pa == p2)
1614
1601
1615 if allow and linear_path:
1602 if allow and linear_path:
1616 raise util.Abort(_("there is nothing to merge, "
1603 raise util.Abort(_("there is nothing to merge, "
1617 "just use 'hg update'"))
1604 "just use 'hg update'"))
1618 if allow and not forcemerge:
1605 if allow and not forcemerge:
1619 if modified or added or removed:
1606 if modified or added or removed:
1620 raise util.Abort(_("outstanding uncommitted changes"))
1607 raise util.Abort(_("outstanding uncommitted changes"))
1621
1608
1622 if not forcemerge and not force:
1609 if not forcemerge and not force:
1623 for f in unknown:
1610 for f in unknown:
1624 if f in m2:
1611 if f in m2:
1625 t1 = self.wread(f)
1612 t1 = self.wread(f)
1626 t2 = self.file(f).read(m2[f])
1613 t2 = self.file(f).read(m2[f])
1627 if cmp(t1, t2) != 0:
1614 if cmp(t1, t2) != 0:
1628 raise util.Abort(_("'%s' already exists in the working"
1615 raise util.Abort(_("'%s' already exists in the working"
1629 " dir and differs from remote") % f)
1616 " dir and differs from remote") % f)
1630
1617
1631 # resolve the manifest to determine which files
1618 # resolve the manifest to determine which files
1632 # we care about merging
1619 # we care about merging
1633 self.ui.note(_("resolving manifests\n"))
1620 self.ui.note(_("resolving manifests\n"))
1634 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1621 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1635 (force, allow, moddirstate, linear_path))
1622 (force, allow, moddirstate, linear_path))
1636 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1623 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1637 (short(man), short(m1n), short(m2n)))
1624 (short(man), short(m1n), short(m2n)))
1638
1625
1639 merge = {}
1626 merge = {}
1640 get = {}
1627 get = {}
1641 remove = []
1628 remove = []
1642
1629
1643 # construct a working dir manifest
1630 # construct a working dir manifest
1644 mw = m1.copy()
1631 mw = m1.copy()
1645 mfw = mf1.copy()
1632 mfw = mf1.copy()
1646 umap = dict.fromkeys(unknown)
1633 umap = dict.fromkeys(unknown)
1647
1634
1648 for f in added + modified + unknown:
1635 for f in added + modified + unknown:
1649 mw[f] = ""
1636 mw[f] = ""
1650 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1637 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1651
1638
1652 if moddirstate and not wlock:
1639 if moddirstate and not wlock:
1653 wlock = self.wlock()
1640 wlock = self.wlock()
1654
1641
1655 for f in deleted + removed:
1642 for f in deleted + removed:
1656 if f in mw:
1643 if f in mw:
1657 del mw[f]
1644 del mw[f]
1658
1645
1659 # If we're jumping between revisions (as opposed to merging),
1646 # If we're jumping between revisions (as opposed to merging),
1660 # and if neither the working directory nor the target rev has
1647 # and if neither the working directory nor the target rev has
1661 # the file, then we need to remove it from the dirstate, to
1648 # the file, then we need to remove it from the dirstate, to
1662 # prevent the dirstate from listing the file when it is no
1649 # prevent the dirstate from listing the file when it is no
1663 # longer in the manifest.
1650 # longer in the manifest.
1664 if moddirstate and linear_path and f not in m2:
1651 if moddirstate and linear_path and f not in m2:
1665 self.dirstate.forget((f,))
1652 self.dirstate.forget((f,))
1666
1653
1667 # Compare manifests
1654 # Compare manifests
1668 for f, n in mw.iteritems():
1655 for f, n in mw.iteritems():
1669 if choose and not choose(f):
1656 if choose and not choose(f):
1670 continue
1657 continue
1671 if f in m2:
1658 if f in m2:
1672 s = 0
1659 s = 0
1673
1660
1674 # is the wfile new since m1, and match m2?
1661 # is the wfile new since m1, and match m2?
1675 if f not in m1:
1662 if f not in m1:
1676 t1 = self.wread(f)
1663 t1 = self.wread(f)
1677 t2 = self.file(f).read(m2[f])
1664 t2 = self.file(f).read(m2[f])
1678 if cmp(t1, t2) == 0:
1665 if cmp(t1, t2) == 0:
1679 n = m2[f]
1666 n = m2[f]
1680 del t1, t2
1667 del t1, t2
1681
1668
1682 # are files different?
1669 # are files different?
1683 if n != m2[f]:
1670 if n != m2[f]:
1684 a = ma.get(f, nullid)
1671 a = ma.get(f, nullid)
1685 # are both different from the ancestor?
1672 # are both different from the ancestor?
1686 if n != a and m2[f] != a:
1673 if n != a and m2[f] != a:
1687 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1674 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1688 # merge executable bits
1675 # merge executable bits
1689 # "if we changed or they changed, change in merge"
1676 # "if we changed or they changed, change in merge"
1690 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1677 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1691 mode = ((a^b) | (a^c)) ^ a
1678 mode = ((a^b) | (a^c)) ^ a
1692 merge[f] = (m1.get(f, nullid), m2[f], mode)
1679 merge[f] = (m1.get(f, nullid), m2[f], mode)
1693 s = 1
1680 s = 1
1694 # are we clobbering?
1681 # are we clobbering?
1695 # is remote's version newer?
1682 # is remote's version newer?
1696 # or are we going back in time?
1683 # or are we going back in time?
1697 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1684 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1698 self.ui.debug(_(" remote %s is newer, get\n") % f)
1685 self.ui.debug(_(" remote %s is newer, get\n") % f)
1699 get[f] = m2[f]
1686 get[f] = m2[f]
1700 s = 1
1687 s = 1
1701 elif f in umap or f in added:
1688 elif f in umap or f in added:
1702 # this unknown file is the same as the checkout
1689 # this unknown file is the same as the checkout
1703 # we need to reset the dirstate if the file was added
1690 # we need to reset the dirstate if the file was added
1704 get[f] = m2[f]
1691 get[f] = m2[f]
1705
1692
1706 if not s and mfw[f] != mf2[f]:
1693 if not s and mfw[f] != mf2[f]:
1707 if force:
1694 if force:
1708 self.ui.debug(_(" updating permissions for %s\n") % f)
1695 self.ui.debug(_(" updating permissions for %s\n") % f)
1709 util.set_exec(self.wjoin(f), mf2[f])
1696 util.set_exec(self.wjoin(f), mf2[f])
1710 else:
1697 else:
1711 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1712 mode = ((a^b) | (a^c)) ^ a
1699 mode = ((a^b) | (a^c)) ^ a
1713 if mode != b:
1700 if mode != b:
1714 self.ui.debug(_(" updating permissions for %s\n")
1701 self.ui.debug(_(" updating permissions for %s\n")
1715 % f)
1702 % f)
1716 util.set_exec(self.wjoin(f), mode)
1703 util.set_exec(self.wjoin(f), mode)
1717 del m2[f]
1704 del m2[f]
1718 elif f in ma:
1705 elif f in ma:
1719 if n != ma[f]:
1706 if n != ma[f]:
1720 r = _("d")
1707 r = _("d")
1721 if not force and (linear_path or allow):
1708 if not force and (linear_path or allow):
1722 r = self.ui.prompt(
1709 r = self.ui.prompt(
1723 (_(" local changed %s which remote deleted\n") % f) +
1710 (_(" local changed %s which remote deleted\n") % f) +
1724 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1711 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1725 if r == _("d"):
1712 if r == _("d"):
1726 remove.append(f)
1713 remove.append(f)
1727 else:
1714 else:
1728 self.ui.debug(_("other deleted %s\n") % f)
1715 self.ui.debug(_("other deleted %s\n") % f)
1729 remove.append(f) # other deleted it
1716 remove.append(f) # other deleted it
1730 else:
1717 else:
1731 # file is created on branch or in working directory
1718 # file is created on branch or in working directory
1732 if force and f not in umap:
1719 if force and f not in umap:
1733 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1720 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1734 remove.append(f)
1721 remove.append(f)
1735 elif n == m1.get(f, nullid): # same as parent
1722 elif n == m1.get(f, nullid): # same as parent
1736 if p2 == pa: # going backwards?
1723 if p2 == pa: # going backwards?
1737 self.ui.debug(_("remote deleted %s\n") % f)
1724 self.ui.debug(_("remote deleted %s\n") % f)
1738 remove.append(f)
1725 remove.append(f)
1739 else:
1726 else:
1740 self.ui.debug(_("local modified %s, keeping\n") % f)
1727 self.ui.debug(_("local modified %s, keeping\n") % f)
1741 else:
1728 else:
1742 self.ui.debug(_("working dir created %s, keeping\n") % f)
1729 self.ui.debug(_("working dir created %s, keeping\n") % f)
1743
1730
1744 for f, n in m2.iteritems():
1731 for f, n in m2.iteritems():
1745 if choose and not choose(f):
1732 if choose and not choose(f):
1746 continue
1733 continue
1747 if f[0] == "/":
1734 if f[0] == "/":
1748 continue
1735 continue
1749 if f in ma and n != ma[f]:
1736 if f in ma and n != ma[f]:
1750 r = _("k")
1737 r = _("k")
1751 if not force and (linear_path or allow):
1738 if not force and (linear_path or allow):
1752 r = self.ui.prompt(
1739 r = self.ui.prompt(
1753 (_("remote changed %s which local deleted\n") % f) +
1740 (_("remote changed %s which local deleted\n") % f) +
1754 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1741 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1755 if r == _("k"):
1742 if r == _("k"):
1756 get[f] = n
1743 get[f] = n
1757 elif f not in ma:
1744 elif f not in ma:
1758 self.ui.debug(_("remote created %s\n") % f)
1745 self.ui.debug(_("remote created %s\n") % f)
1759 get[f] = n
1746 get[f] = n
1760 else:
1747 else:
1761 if force or p2 == pa: # going backwards?
1748 if force or p2 == pa: # going backwards?
1762 self.ui.debug(_("local deleted %s, recreating\n") % f)
1749 self.ui.debug(_("local deleted %s, recreating\n") % f)
1763 get[f] = n
1750 get[f] = n
1764 else:
1751 else:
1765 self.ui.debug(_("local deleted %s\n") % f)
1752 self.ui.debug(_("local deleted %s\n") % f)
1766
1753
1767 del mw, m1, m2, ma
1754 del mw, m1, m2, ma
1768
1755
1769 if force:
1756 if force:
1770 for f in merge:
1757 for f in merge:
1771 get[f] = merge[f][1]
1758 get[f] = merge[f][1]
1772 merge = {}
1759 merge = {}
1773
1760
1774 if linear_path or force:
1761 if linear_path or force:
1775 # we don't need to do any magic, just jump to the new rev
1762 # we don't need to do any magic, just jump to the new rev
1776 branch_merge = False
1763 branch_merge = False
1777 p1, p2 = p2, nullid
1764 p1, p2 = p2, nullid
1778 else:
1765 else:
1779 if not allow:
1766 if not allow:
1780 self.ui.status(_("this update spans a branch"
1767 self.ui.status(_("this update spans a branch"
1781 " affecting the following files:\n"))
1768 " affecting the following files:\n"))
1782 fl = merge.keys() + get.keys()
1769 fl = merge.keys() + get.keys()
1783 fl.sort()
1770 fl.sort()
1784 for f in fl:
1771 for f in fl:
1785 cf = ""
1772 cf = ""
1786 if f in merge:
1773 if f in merge:
1787 cf = _(" (resolve)")
1774 cf = _(" (resolve)")
1788 self.ui.status(" %s%s\n" % (f, cf))
1775 self.ui.status(" %s%s\n" % (f, cf))
1789 self.ui.warn(_("aborting update spanning branches!\n"))
1776 self.ui.warn(_("aborting update spanning branches!\n"))
1790 self.ui.status(_("(use 'hg merge' to merge across branches"
1777 self.ui.status(_("(use 'hg merge' to merge across branches"
1791 " or 'hg update -C' to lose changes)\n"))
1778 " or 'hg update -C' to lose changes)\n"))
1792 return 1
1779 return 1
1793 branch_merge = True
1780 branch_merge = True
1794
1781
1795 xp1 = hex(p1)
1782 xp1 = hex(p1)
1796 xp2 = hex(p2)
1783 xp2 = hex(p2)
1797 if p2 == nullid: xxp2 = ''
1784 if p2 == nullid: xxp2 = ''
1798 else: xxp2 = xp2
1785 else: xxp2 = xp2
1799
1786
1800 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1787 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1801
1788
1802 # get the files we don't need to change
1789 # get the files we don't need to change
1803 files = get.keys()
1790 files = get.keys()
1804 files.sort()
1791 files.sort()
1805 for f in files:
1792 for f in files:
1806 if f[0] == "/":
1793 if f[0] == "/":
1807 continue
1794 continue
1808 self.ui.note(_("getting %s\n") % f)
1795 self.ui.note(_("getting %s\n") % f)
1809 t = self.file(f).read(get[f])
1796 t = self.file(f).read(get[f])
1810 self.wwrite(f, t)
1797 self.wwrite(f, t)
1811 util.set_exec(self.wjoin(f), mf2[f])
1798 util.set_exec(self.wjoin(f), mf2[f])
1812 if moddirstate:
1799 if moddirstate:
1813 if branch_merge:
1800 if branch_merge:
1814 self.dirstate.update([f], 'n', st_mtime=-1)
1801 self.dirstate.update([f], 'n', st_mtime=-1)
1815 else:
1802 else:
1816 self.dirstate.update([f], 'n')
1803 self.dirstate.update([f], 'n')
1817
1804
1818 # merge the tricky bits
1805 # merge the tricky bits
1819 failedmerge = []
1806 failedmerge = []
1820 files = merge.keys()
1807 files = merge.keys()
1821 files.sort()
1808 files.sort()
1822 for f in files:
1809 for f in files:
1823 self.ui.status(_("merging %s\n") % f)
1810 self.ui.status(_("merging %s\n") % f)
1824 my, other, flag = merge[f]
1811 my, other, flag = merge[f]
1825 ret = self.merge3(f, my, other, xp1, xp2)
1812 ret = self.merge3(f, my, other, xp1, xp2)
1826 if ret:
1813 if ret:
1827 err = True
1814 err = True
1828 failedmerge.append(f)
1815 failedmerge.append(f)
1829 util.set_exec(self.wjoin(f), flag)
1816 util.set_exec(self.wjoin(f), flag)
1830 if moddirstate:
1817 if moddirstate:
1831 if branch_merge:
1818 if branch_merge:
1832 # We've done a branch merge, mark this file as merged
1819 # We've done a branch merge, mark this file as merged
1833 # so that we properly record the merger later
1820 # so that we properly record the merger later
1834 self.dirstate.update([f], 'm')
1821 self.dirstate.update([f], 'm')
1835 else:
1822 else:
1836 # We've update-merged a locally modified file, so
1823 # We've update-merged a locally modified file, so
1837 # we set the dirstate to emulate a normal checkout
1824 # we set the dirstate to emulate a normal checkout
1838 # of that file some time in the past. Thus our
1825 # of that file some time in the past. Thus our
1839 # merge will appear as a normal local file
1826 # merge will appear as a normal local file
1840 # modification.
1827 # modification.
1841 f_len = len(self.file(f).read(other))
1828 f_len = len(self.file(f).read(other))
1842 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1829 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1843
1830
1844 remove.sort()
1831 remove.sort()
1845 for f in remove:
1832 for f in remove:
1846 self.ui.note(_("removing %s\n") % f)
1833 self.ui.note(_("removing %s\n") % f)
1847 util.audit_path(f)
1834 util.audit_path(f)
1848 try:
1835 try:
1849 util.unlink(self.wjoin(f))
1836 util.unlink(self.wjoin(f))
1850 except OSError, inst:
1837 except OSError, inst:
1851 if inst.errno != errno.ENOENT:
1838 if inst.errno != errno.ENOENT:
1852 self.ui.warn(_("update failed to remove %s: %s!\n") %
1839 self.ui.warn(_("update failed to remove %s: %s!\n") %
1853 (f, inst.strerror))
1840 (f, inst.strerror))
1854 if moddirstate:
1841 if moddirstate:
1855 if branch_merge:
1842 if branch_merge:
1856 self.dirstate.update(remove, 'r')
1843 self.dirstate.update(remove, 'r')
1857 else:
1844 else:
1858 self.dirstate.forget(remove)
1845 self.dirstate.forget(remove)
1859
1846
1860 if moddirstate:
1847 if moddirstate:
1861 self.dirstate.setparents(p1, p2)
1848 self.dirstate.setparents(p1, p2)
1862
1849
1863 if show_stats:
1850 if show_stats:
1864 stats = ((len(get), _("updated")),
1851 stats = ((len(get), _("updated")),
1865 (len(merge) - len(failedmerge), _("merged")),
1852 (len(merge) - len(failedmerge), _("merged")),
1866 (len(remove), _("removed")),
1853 (len(remove), _("removed")),
1867 (len(failedmerge), _("unresolved")))
1854 (len(failedmerge), _("unresolved")))
1868 note = ", ".join([_("%d files %s") % s for s in stats])
1855 note = ", ".join([_("%d files %s") % s for s in stats])
1869 self.ui.status("%s\n" % note)
1856 self.ui.status("%s\n" % note)
1870 if moddirstate:
1857 if moddirstate:
1871 if branch_merge:
1858 if branch_merge:
1872 if failedmerge:
1859 if failedmerge:
1873 self.ui.status(_("There are unresolved merges,"
1860 self.ui.status(_("There are unresolved merges,"
1874 " you can redo the full merge using:\n"
1861 " you can redo the full merge using:\n"
1875 " hg update -C %s\n"
1862 " hg update -C %s\n"
1876 " hg merge %s\n"
1863 " hg merge %s\n"
1877 % (self.changelog.rev(p1),
1864 % (self.changelog.rev(p1),
1878 self.changelog.rev(p2))))
1865 self.changelog.rev(p2))))
1879 else:
1866 else:
1880 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1867 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1881 elif failedmerge:
1868 elif failedmerge:
1882 self.ui.status(_("There are unresolved merges with"
1869 self.ui.status(_("There are unresolved merges with"
1883 " locally modified files.\n"))
1870 " locally modified files.\n"))
1884
1871
1885 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1872 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1886 return err
1873 return err
1887
1874
1888 def merge3(self, fn, my, other, p1, p2):
1875 def merge3(self, fn, my, other, p1, p2):
1889 """perform a 3-way merge in the working directory"""
1876 """perform a 3-way merge in the working directory"""
1890
1877
1891 def temp(prefix, node):
1878 def temp(prefix, node):
1892 pre = "%s~%s." % (os.path.basename(fn), prefix)
1879 pre = "%s~%s." % (os.path.basename(fn), prefix)
1893 (fd, name) = tempfile.mkstemp(prefix=pre)
1880 (fd, name) = tempfile.mkstemp(prefix=pre)
1894 f = os.fdopen(fd, "wb")
1881 f = os.fdopen(fd, "wb")
1895 self.wwrite(fn, fl.read(node), f)
1882 self.wwrite(fn, fl.read(node), f)
1896 f.close()
1883 f.close()
1897 return name
1884 return name
1898
1885
1899 fl = self.file(fn)
1886 fl = self.file(fn)
1900 base = fl.ancestor(my, other)
1887 base = fl.ancestor(my, other)
1901 a = self.wjoin(fn)
1888 a = self.wjoin(fn)
1902 b = temp("base", base)
1889 b = temp("base", base)
1903 c = temp("other", other)
1890 c = temp("other", other)
1904
1891
1905 self.ui.note(_("resolving %s\n") % fn)
1892 self.ui.note(_("resolving %s\n") % fn)
1906 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1893 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1907 (fn, short(my), short(other), short(base)))
1894 (fn, short(my), short(other), short(base)))
1908
1895
1909 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1896 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1910 or "hgmerge")
1897 or "hgmerge")
1911 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1898 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1912 environ={'HG_FILE': fn,
1899 environ={'HG_FILE': fn,
1913 'HG_MY_NODE': p1,
1900 'HG_MY_NODE': p1,
1914 'HG_OTHER_NODE': p2,
1901 'HG_OTHER_NODE': p2,
1915 'HG_FILE_MY_NODE': hex(my),
1902 'HG_FILE_MY_NODE': hex(my),
1916 'HG_FILE_OTHER_NODE': hex(other),
1903 'HG_FILE_OTHER_NODE': hex(other),
1917 'HG_FILE_BASE_NODE': hex(base)})
1904 'HG_FILE_BASE_NODE': hex(base)})
1918 if r:
1905 if r:
1919 self.ui.warn(_("merging %s failed!\n") % fn)
1906 self.ui.warn(_("merging %s failed!\n") % fn)
1920
1907
1921 os.unlink(b)
1908 os.unlink(b)
1922 os.unlink(c)
1909 os.unlink(c)
1923 return r
1910 return r
1924
1911
1925 def verify(self):
1912 def verify(self):
1926 filelinkrevs = {}
1913 filelinkrevs = {}
1927 filenodes = {}
1914 filenodes = {}
1928 changesets = revisions = files = 0
1915 changesets = revisions = files = 0
1929 errors = [0]
1916 errors = [0]
1930 warnings = [0]
1917 warnings = [0]
1931 neededmanifests = {}
1918 neededmanifests = {}
1932
1919
1933 def err(msg):
1920 def err(msg):
1934 self.ui.warn(msg + "\n")
1921 self.ui.warn(msg + "\n")
1935 errors[0] += 1
1922 errors[0] += 1
1936
1923
1937 def warn(msg):
1924 def warn(msg):
1938 self.ui.warn(msg + "\n")
1925 self.ui.warn(msg + "\n")
1939 warnings[0] += 1
1926 warnings[0] += 1
1940
1927
1941 def checksize(obj, name):
1928 def checksize(obj, name):
1942 d = obj.checksize()
1929 d = obj.checksize()
1943 if d[0]:
1930 if d[0]:
1944 err(_("%s data length off by %d bytes") % (name, d[0]))
1931 err(_("%s data length off by %d bytes") % (name, d[0]))
1945 if d[1]:
1932 if d[1]:
1946 err(_("%s index contains %d extra bytes") % (name, d[1]))
1933 err(_("%s index contains %d extra bytes") % (name, d[1]))
1947
1934
1948 def checkversion(obj, name):
1935 def checkversion(obj, name):
1949 if obj.version != revlog.REVLOGV0:
1936 if obj.version != revlog.REVLOGV0:
1950 if not revlogv1:
1937 if not revlogv1:
1951 warn(_("warning: `%s' uses revlog format 1") % name)
1938 warn(_("warning: `%s' uses revlog format 1") % name)
1952 elif revlogv1:
1939 elif revlogv1:
1953 warn(_("warning: `%s' uses revlog format 0") % name)
1940 warn(_("warning: `%s' uses revlog format 0") % name)
1954
1941
1955 revlogv1 = self.revlogversion != revlog.REVLOGV0
1942 revlogv1 = self.revlogversion != revlog.REVLOGV0
1956 if self.ui.verbose or revlogv1 != self.revlogv1:
1943 if self.ui.verbose or revlogv1 != self.revlogv1:
1957 self.ui.status(_("repository uses revlog format %d\n") %
1944 self.ui.status(_("repository uses revlog format %d\n") %
1958 (revlogv1 and 1 or 0))
1945 (revlogv1 and 1 or 0))
1959
1946
1960 seen = {}
1947 seen = {}
1961 self.ui.status(_("checking changesets\n"))
1948 self.ui.status(_("checking changesets\n"))
1962 checksize(self.changelog, "changelog")
1949 checksize(self.changelog, "changelog")
1963
1950
1964 for i in range(self.changelog.count()):
1951 for i in range(self.changelog.count()):
1965 changesets += 1
1952 changesets += 1
1966 n = self.changelog.node(i)
1953 n = self.changelog.node(i)
1967 l = self.changelog.linkrev(n)
1954 l = self.changelog.linkrev(n)
1968 if l != i:
1955 if l != i:
1969 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1956 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1970 if n in seen:
1957 if n in seen:
1971 err(_("duplicate changeset at revision %d") % i)
1958 err(_("duplicate changeset at revision %d") % i)
1972 seen[n] = 1
1959 seen[n] = 1
1973
1960
1974 for p in self.changelog.parents(n):
1961 for p in self.changelog.parents(n):
1975 if p not in self.changelog.nodemap:
1962 if p not in self.changelog.nodemap:
1976 err(_("changeset %s has unknown parent %s") %
1963 err(_("changeset %s has unknown parent %s") %
1977 (short(n), short(p)))
1964 (short(n), short(p)))
1978 try:
1965 try:
1979 changes = self.changelog.read(n)
1966 changes = self.changelog.read(n)
1980 except KeyboardInterrupt:
1967 except KeyboardInterrupt:
1981 self.ui.warn(_("interrupted"))
1968 self.ui.warn(_("interrupted"))
1982 raise
1969 raise
1983 except Exception, inst:
1970 except Exception, inst:
1984 err(_("unpacking changeset %s: %s") % (short(n), inst))
1971 err(_("unpacking changeset %s: %s") % (short(n), inst))
1985 continue
1972 continue
1986
1973
1987 neededmanifests[changes[0]] = n
1974 neededmanifests[changes[0]] = n
1988
1975
1989 for f in changes[3]:
1976 for f in changes[3]:
1990 filelinkrevs.setdefault(f, []).append(i)
1977 filelinkrevs.setdefault(f, []).append(i)
1991
1978
1992 seen = {}
1979 seen = {}
1993 self.ui.status(_("checking manifests\n"))
1980 self.ui.status(_("checking manifests\n"))
1994 checkversion(self.manifest, "manifest")
1981 checkversion(self.manifest, "manifest")
1995 checksize(self.manifest, "manifest")
1982 checksize(self.manifest, "manifest")
1996
1983
1997 for i in range(self.manifest.count()):
1984 for i in range(self.manifest.count()):
1998 n = self.manifest.node(i)
1985 n = self.manifest.node(i)
1999 l = self.manifest.linkrev(n)
1986 l = self.manifest.linkrev(n)
2000
1987
2001 if l < 0 or l >= self.changelog.count():
1988 if l < 0 or l >= self.changelog.count():
2002 err(_("bad manifest link (%d) at revision %d") % (l, i))
1989 err(_("bad manifest link (%d) at revision %d") % (l, i))
2003
1990
2004 if n in neededmanifests:
1991 if n in neededmanifests:
2005 del neededmanifests[n]
1992 del neededmanifests[n]
2006
1993
2007 if n in seen:
1994 if n in seen:
2008 err(_("duplicate manifest at revision %d") % i)
1995 err(_("duplicate manifest at revision %d") % i)
2009
1996
2010 seen[n] = 1
1997 seen[n] = 1
2011
1998
2012 for p in self.manifest.parents(n):
1999 for p in self.manifest.parents(n):
2013 if p not in self.manifest.nodemap:
2000 if p not in self.manifest.nodemap:
2014 err(_("manifest %s has unknown parent %s") %
2001 err(_("manifest %s has unknown parent %s") %
2015 (short(n), short(p)))
2002 (short(n), short(p)))
2016
2003
2017 try:
2004 try:
2018 delta = mdiff.patchtext(self.manifest.delta(n))
2005 delta = mdiff.patchtext(self.manifest.delta(n))
2019 except KeyboardInterrupt:
2006 except KeyboardInterrupt:
2020 self.ui.warn(_("interrupted"))
2007 self.ui.warn(_("interrupted"))
2021 raise
2008 raise
2022 except Exception, inst:
2009 except Exception, inst:
2023 err(_("unpacking manifest %s: %s") % (short(n), inst))
2010 err(_("unpacking manifest %s: %s") % (short(n), inst))
2024 continue
2011 continue
2025
2012
2026 try:
2013 try:
2027 ff = [ l.split('\0') for l in delta.splitlines() ]
2014 ff = [ l.split('\0') for l in delta.splitlines() ]
2028 for f, fn in ff:
2015 for f, fn in ff:
2029 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2016 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2030 except (ValueError, TypeError), inst:
2017 except (ValueError, TypeError), inst:
2031 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2018 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2032
2019
2033 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2020 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2034
2021
2035 for m, c in neededmanifests.items():
2022 for m, c in neededmanifests.items():
2036 err(_("Changeset %s refers to unknown manifest %s") %
2023 err(_("Changeset %s refers to unknown manifest %s") %
2037 (short(m), short(c)))
2024 (short(m), short(c)))
2038 del neededmanifests
2025 del neededmanifests
2039
2026
2040 for f in filenodes:
2027 for f in filenodes:
2041 if f not in filelinkrevs:
2028 if f not in filelinkrevs:
2042 err(_("file %s in manifest but not in changesets") % f)
2029 err(_("file %s in manifest but not in changesets") % f)
2043
2030
2044 for f in filelinkrevs:
2031 for f in filelinkrevs:
2045 if f not in filenodes:
2032 if f not in filenodes:
2046 err(_("file %s in changeset but not in manifest") % f)
2033 err(_("file %s in changeset but not in manifest") % f)
2047
2034
2048 self.ui.status(_("checking files\n"))
2035 self.ui.status(_("checking files\n"))
2049 ff = filenodes.keys()
2036 ff = filenodes.keys()
2050 ff.sort()
2037 ff.sort()
2051 for f in ff:
2038 for f in ff:
2052 if f == "/dev/null":
2039 if f == "/dev/null":
2053 continue
2040 continue
2054 files += 1
2041 files += 1
2055 if not f:
2042 if not f:
2056 err(_("file without name in manifest %s") % short(n))
2043 err(_("file without name in manifest %s") % short(n))
2057 continue
2044 continue
2058 fl = self.file(f)
2045 fl = self.file(f)
2059 checkversion(fl, f)
2046 checkversion(fl, f)
2060 checksize(fl, f)
2047 checksize(fl, f)
2061
2048
2062 nodes = {nullid: 1}
2049 nodes = {nullid: 1}
2063 seen = {}
2050 seen = {}
2064 for i in range(fl.count()):
2051 for i in range(fl.count()):
2065 revisions += 1
2052 revisions += 1
2066 n = fl.node(i)
2053 n = fl.node(i)
2067
2054
2068 if n in seen:
2055 if n in seen:
2069 err(_("%s: duplicate revision %d") % (f, i))
2056 err(_("%s: duplicate revision %d") % (f, i))
2070 if n not in filenodes[f]:
2057 if n not in filenodes[f]:
2071 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2058 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2072 else:
2059 else:
2073 del filenodes[f][n]
2060 del filenodes[f][n]
2074
2061
2075 flr = fl.linkrev(n)
2062 flr = fl.linkrev(n)
2076 if flr not in filelinkrevs.get(f, []):
2063 if flr not in filelinkrevs.get(f, []):
2077 err(_("%s:%s points to unexpected changeset %d")
2064 err(_("%s:%s points to unexpected changeset %d")
2078 % (f, short(n), flr))
2065 % (f, short(n), flr))
2079 else:
2066 else:
2080 filelinkrevs[f].remove(flr)
2067 filelinkrevs[f].remove(flr)
2081
2068
2082 # verify contents
2069 # verify contents
2083 try:
2070 try:
2084 t = fl.read(n)
2071 t = fl.read(n)
2085 except KeyboardInterrupt:
2072 except KeyboardInterrupt:
2086 self.ui.warn(_("interrupted"))
2073 self.ui.warn(_("interrupted"))
2087 raise
2074 raise
2088 except Exception, inst:
2075 except Exception, inst:
2089 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2076 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2090
2077
2091 # verify parents
2078 # verify parents
2092 (p1, p2) = fl.parents(n)
2079 (p1, p2) = fl.parents(n)
2093 if p1 not in nodes:
2080 if p1 not in nodes:
2094 err(_("file %s:%s unknown parent 1 %s") %
2081 err(_("file %s:%s unknown parent 1 %s") %
2095 (f, short(n), short(p1)))
2082 (f, short(n), short(p1)))
2096 if p2 not in nodes:
2083 if p2 not in nodes:
2097 err(_("file %s:%s unknown parent 2 %s") %
2084 err(_("file %s:%s unknown parent 2 %s") %
2098 (f, short(n), short(p1)))
2085 (f, short(n), short(p1)))
2099 nodes[n] = 1
2086 nodes[n] = 1
2100
2087
2101 # cross-check
2088 # cross-check
2102 for node in filenodes[f]:
2089 for node in filenodes[f]:
2103 err(_("node %s in manifests not in %s") % (hex(node), f))
2090 err(_("node %s in manifests not in %s") % (hex(node), f))
2104
2091
2105 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2092 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2106 (files, changesets, revisions))
2093 (files, changesets, revisions))
2107
2094
2108 if warnings[0]:
2095 if warnings[0]:
2109 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2096 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2110 if errors[0]:
2097 if errors[0]:
2111 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2098 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2112 return 1
2099 return 1
2113
2100
2114 # used to avoid circular references so destructors work
2101 # used to avoid circular references so destructors work
2115 def aftertrans(base):
2102 def aftertrans(base):
2116 p = base
2103 p = base
2117 def a():
2104 def a():
2118 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2105 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2119 util.rename(os.path.join(p, "journal.dirstate"),
2106 util.rename(os.path.join(p, "journal.dirstate"),
2120 os.path.join(p, "undo.dirstate"))
2107 os.path.join(p, "undo.dirstate"))
2121 return a
2108 return a
2122
2109
General Comments 0
You need to be logged in to leave comments. Login now