##// END OF EJS Templates
merge: handle directory renames...
Matt Mackall -
r3733:9e67fecb default
parent child Browse files
Show More
@@ -0,0 +1,32 b''
1 #!/bin/sh
2
3 mkdir t
4 cd t
5 hg init
6
7 mkdir a
8 echo foo > a/a
9 echo bar > a/b
10
11 hg add a
12 hg ci -m "0" -d "0 0"
13
14 hg co -C 0
15 hg mv a b
16 hg ci -m "1 mv a/ b/" -d "0 0"
17
18 hg co -C 0
19 echo baz > a/c
20 hg add a/c
21 hg ci -m "2 add a/c" -d "0 0"
22
23 hg merge --debug 1
24 ls a/ b/
25 hg st -C
26 hg ci -m "3 merge 2+1" -d "0 0"
27
28 hg co -C 1
29 hg merge --debug 2
30 ls a/ b/
31 hg st -C
32 hg ci -m "4 merge 1+2" -d "0 0"
@@ -1,1897 +1,1899 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 else:
40 else:
41 raise repo.RepoError(_("repository %s not found") % path)
41 raise repo.RepoError(_("repository %s not found") % path)
42 elif create:
42 elif create:
43 raise repo.RepoError(_("repository %s already exists") % path)
43 raise repo.RepoError(_("repository %s already exists") % path)
44
44
45 self.root = os.path.realpath(path)
45 self.root = os.path.realpath(path)
46 self.origroot = path
46 self.origroot = path
47 self.ui = ui.ui(parentui=parentui)
47 self.ui = ui.ui(parentui=parentui)
48 self.opener = util.opener(self.path)
48 self.opener = util.opener(self.path)
49 self.sopener = util.opener(self.path)
49 self.sopener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.sopener, v)
69 self.manifest = manifest.manifest(self.sopener, v)
70 self.changelog = changelog.changelog(self.sopener, v)
70 self.changelog = changelog.changelog(self.sopener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
203 return
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
212 self.add(['.hgtags'])
213
213
214 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216
216
217 def tags(self):
217 def tags(self):
218 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
219 if not self.tagscache:
219 if not self.tagscache:
220 self.tagscache = {}
220 self.tagscache = {}
221
221
222 def parsetag(line, context):
222 def parsetag(line, context):
223 if not line:
223 if not line:
224 return
224 return
225 s = l.split(" ", 1)
225 s = l.split(" ", 1)
226 if len(s) != 2:
226 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
228 return
229 node, key = s
229 node, key = s
230 key = key.strip()
230 key = key.strip()
231 try:
231 try:
232 bin_n = bin(node)
232 bin_n = bin(node)
233 except TypeError:
233 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
235 (context, node))
236 return
236 return
237 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
239 (context, key))
240 return
240 return
241 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
242
242
243 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
245 # taking precedence
245 # taking precedence
246 f = None
246 f = None
247 for rev, node, fnode in self._hgtagsnodes():
247 for rev, node, fnode in self._hgtagsnodes():
248 f = (f and f.filectx(fnode) or
248 f = (f and f.filectx(fnode) or
249 self.filectx('.hgtags', fileid=fnode))
249 self.filectx('.hgtags', fileid=fnode))
250 count = 0
250 count = 0
251 for l in f.data().splitlines():
251 for l in f.data().splitlines():
252 count += 1
252 count += 1
253 parsetag(l, _("%s, line %d") % (str(f), count))
253 parsetag(l, _("%s, line %d") % (str(f), count))
254
254
255 try:
255 try:
256 f = self.opener("localtags")
256 f = self.opener("localtags")
257 count = 0
257 count = 0
258 for l in f:
258 for l in f:
259 count += 1
259 count += 1
260 parsetag(l, _("localtags, line %d") % count)
260 parsetag(l, _("localtags, line %d") % count)
261 except IOError:
261 except IOError:
262 pass
262 pass
263
263
264 self.tagscache['tip'] = self.changelog.tip()
264 self.tagscache['tip'] = self.changelog.tip()
265
265
266 return self.tagscache
266 return self.tagscache
267
267
268 def _hgtagsnodes(self):
268 def _hgtagsnodes(self):
269 heads = self.heads()
269 heads = self.heads()
270 heads.reverse()
270 heads.reverse()
271 last = {}
271 last = {}
272 ret = []
272 ret = []
273 for node in heads:
273 for node in heads:
274 c = self.changectx(node)
274 c = self.changectx(node)
275 rev = c.rev()
275 rev = c.rev()
276 try:
276 try:
277 fnode = c.filenode('.hgtags')
277 fnode = c.filenode('.hgtags')
278 except repo.LookupError:
278 except repo.LookupError:
279 continue
279 continue
280 ret.append((rev, node, fnode))
280 ret.append((rev, node, fnode))
281 if fnode in last:
281 if fnode in last:
282 ret[last[fnode]] = None
282 ret[last[fnode]] = None
283 last[fnode] = len(ret) - 1
283 last[fnode] = len(ret) - 1
284 return [item for item in ret if item]
284 return [item for item in ret if item]
285
285
286 def tagslist(self):
286 def tagslist(self):
287 '''return a list of tags ordered by revision'''
287 '''return a list of tags ordered by revision'''
288 l = []
288 l = []
289 for t, n in self.tags().items():
289 for t, n in self.tags().items():
290 try:
290 try:
291 r = self.changelog.rev(n)
291 r = self.changelog.rev(n)
292 except:
292 except:
293 r = -2 # sort to the beginning of the list if unknown
293 r = -2 # sort to the beginning of the list if unknown
294 l.append((r, t, n))
294 l.append((r, t, n))
295 l.sort()
295 l.sort()
296 return [(t, n) for r, t, n in l]
296 return [(t, n) for r, t, n in l]
297
297
298 def nodetags(self, node):
298 def nodetags(self, node):
299 '''return the tags associated with a node'''
299 '''return the tags associated with a node'''
300 if not self.nodetagscache:
300 if not self.nodetagscache:
301 self.nodetagscache = {}
301 self.nodetagscache = {}
302 for t, n in self.tags().items():
302 for t, n in self.tags().items():
303 self.nodetagscache.setdefault(n, []).append(t)
303 self.nodetagscache.setdefault(n, []).append(t)
304 return self.nodetagscache.get(node, [])
304 return self.nodetagscache.get(node, [])
305
305
306 def branchtags(self):
306 def branchtags(self):
307 if self.branchcache != None:
307 if self.branchcache != None:
308 return self.branchcache
308 return self.branchcache
309
309
310 self.branchcache = {} # avoid recursion in changectx
310 self.branchcache = {} # avoid recursion in changectx
311
311
312 partial, last, lrev = self._readbranchcache()
312 partial, last, lrev = self._readbranchcache()
313
313
314 tiprev = self.changelog.count() - 1
314 tiprev = self.changelog.count() - 1
315 if lrev != tiprev:
315 if lrev != tiprev:
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318
318
319 self.branchcache = partial
319 self.branchcache = partial
320 return self.branchcache
320 return self.branchcache
321
321
322 def _readbranchcache(self):
322 def _readbranchcache(self):
323 partial = {}
323 partial = {}
324 try:
324 try:
325 f = self.opener("branches.cache")
325 f = self.opener("branches.cache")
326 lines = f.read().split('\n')
326 lines = f.read().split('\n')
327 f.close()
327 f.close()
328 last, lrev = lines.pop(0).rstrip().split(" ", 1)
328 last, lrev = lines.pop(0).rstrip().split(" ", 1)
329 last, lrev = bin(last), int(lrev)
329 last, lrev = bin(last), int(lrev)
330 if (lrev < self.changelog.count() and
330 if (lrev < self.changelog.count() and
331 self.changelog.node(lrev) == last): # sanity check
331 self.changelog.node(lrev) == last): # sanity check
332 for l in lines:
332 for l in lines:
333 if not l: continue
333 if not l: continue
334 node, label = l.rstrip().split(" ", 1)
334 node, label = l.rstrip().split(" ", 1)
335 partial[label] = bin(node)
335 partial[label] = bin(node)
336 else: # invalidate the cache
336 else: # invalidate the cache
337 last, lrev = nullid, nullrev
337 last, lrev = nullid, nullrev
338 except IOError:
338 except IOError:
339 last, lrev = nullid, nullrev
339 last, lrev = nullid, nullrev
340 return partial, last, lrev
340 return partial, last, lrev
341
341
342 def _writebranchcache(self, branches, tip, tiprev):
342 def _writebranchcache(self, branches, tip, tiprev):
343 try:
343 try:
344 f = self.opener("branches.cache", "w")
344 f = self.opener("branches.cache", "w")
345 f.write("%s %s\n" % (hex(tip), tiprev))
345 f.write("%s %s\n" % (hex(tip), tiprev))
346 for label, node in branches.iteritems():
346 for label, node in branches.iteritems():
347 f.write("%s %s\n" % (hex(node), label))
347 f.write("%s %s\n" % (hex(node), label))
348 except IOError:
348 except IOError:
349 pass
349 pass
350
350
351 def _updatebranchcache(self, partial, start, end):
351 def _updatebranchcache(self, partial, start, end):
352 for r in xrange(start, end):
352 for r in xrange(start, end):
353 c = self.changectx(r)
353 c = self.changectx(r)
354 b = c.branch()
354 b = c.branch()
355 if b:
355 if b:
356 partial[b] = c.node()
356 partial[b] = c.node()
357
357
358 def lookup(self, key):
358 def lookup(self, key):
359 if key == '.':
359 if key == '.':
360 key = self.dirstate.parents()[0]
360 key = self.dirstate.parents()[0]
361 if key == nullid:
361 if key == nullid:
362 raise repo.RepoError(_("no revision checked out"))
362 raise repo.RepoError(_("no revision checked out"))
363 n = self.changelog._match(key)
363 n = self.changelog._match(key)
364 if n:
364 if n:
365 return n
365 return n
366 if key in self.tags():
366 if key in self.tags():
367 return self.tags()[key]
367 return self.tags()[key]
368 if key in self.branchtags():
368 if key in self.branchtags():
369 return self.branchtags()[key]
369 return self.branchtags()[key]
370 n = self.changelog._partialmatch(key)
370 n = self.changelog._partialmatch(key)
371 if n:
371 if n:
372 return n
372 return n
373 raise repo.RepoError(_("unknown revision '%s'") % key)
373 raise repo.RepoError(_("unknown revision '%s'") % key)
374
374
375 def dev(self):
375 def dev(self):
376 return os.lstat(self.path).st_dev
376 return os.lstat(self.path).st_dev
377
377
378 def local(self):
378 def local(self):
379 return True
379 return True
380
380
381 def join(self, f):
381 def join(self, f):
382 return os.path.join(self.path, f)
382 return os.path.join(self.path, f)
383
383
384 def sjoin(self, f):
384 def sjoin(self, f):
385 return os.path.join(self.path, f)
385 return os.path.join(self.path, f)
386
386
387 def wjoin(self, f):
387 def wjoin(self, f):
388 return os.path.join(self.root, f)
388 return os.path.join(self.root, f)
389
389
390 def file(self, f):
390 def file(self, f):
391 if f[0] == '/':
391 if f[0] == '/':
392 f = f[1:]
392 f = f[1:]
393 return filelog.filelog(self.sopener, f, self.revlogversion)
393 return filelog.filelog(self.sopener, f, self.revlogversion)
394
394
395 def changectx(self, changeid=None):
395 def changectx(self, changeid=None):
396 return context.changectx(self, changeid)
396 return context.changectx(self, changeid)
397
397
398 def workingctx(self):
398 def workingctx(self):
399 return context.workingctx(self)
399 return context.workingctx(self)
400
400
401 def parents(self, changeid=None):
401 def parents(self, changeid=None):
402 '''
402 '''
403 get list of changectxs for parents of changeid or working directory
403 get list of changectxs for parents of changeid or working directory
404 '''
404 '''
405 if changeid is None:
405 if changeid is None:
406 pl = self.dirstate.parents()
406 pl = self.dirstate.parents()
407 else:
407 else:
408 n = self.changelog.lookup(changeid)
408 n = self.changelog.lookup(changeid)
409 pl = self.changelog.parents(n)
409 pl = self.changelog.parents(n)
410 if pl[1] == nullid:
410 if pl[1] == nullid:
411 return [self.changectx(pl[0])]
411 return [self.changectx(pl[0])]
412 return [self.changectx(pl[0]), self.changectx(pl[1])]
412 return [self.changectx(pl[0]), self.changectx(pl[1])]
413
413
414 def filectx(self, path, changeid=None, fileid=None):
414 def filectx(self, path, changeid=None, fileid=None):
415 """changeid can be a changeset revision, node, or tag.
415 """changeid can be a changeset revision, node, or tag.
416 fileid can be a file revision or node."""
416 fileid can be a file revision or node."""
417 return context.filectx(self, path, changeid, fileid)
417 return context.filectx(self, path, changeid, fileid)
418
418
419 def getcwd(self):
419 def getcwd(self):
420 return self.dirstate.getcwd()
420 return self.dirstate.getcwd()
421
421
422 def wfile(self, f, mode='r'):
422 def wfile(self, f, mode='r'):
423 return self.wopener(f, mode)
423 return self.wopener(f, mode)
424
424
425 def wread(self, filename):
425 def wread(self, filename):
426 if self.encodepats == None:
426 if self.encodepats == None:
427 l = []
427 l = []
428 for pat, cmd in self.ui.configitems("encode"):
428 for pat, cmd in self.ui.configitems("encode"):
429 mf = util.matcher(self.root, "", [pat], [], [])[1]
429 mf = util.matcher(self.root, "", [pat], [], [])[1]
430 l.append((mf, cmd))
430 l.append((mf, cmd))
431 self.encodepats = l
431 self.encodepats = l
432
432
433 data = self.wopener(filename, 'r').read()
433 data = self.wopener(filename, 'r').read()
434
434
435 for mf, cmd in self.encodepats:
435 for mf, cmd in self.encodepats:
436 if mf(filename):
436 if mf(filename):
437 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
437 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
438 data = util.filter(data, cmd)
438 data = util.filter(data, cmd)
439 break
439 break
440
440
441 return data
441 return data
442
442
443 def wwrite(self, filename, data, fd=None):
443 def wwrite(self, filename, data, fd=None):
444 if self.decodepats == None:
444 if self.decodepats == None:
445 l = []
445 l = []
446 for pat, cmd in self.ui.configitems("decode"):
446 for pat, cmd in self.ui.configitems("decode"):
447 mf = util.matcher(self.root, "", [pat], [], [])[1]
447 mf = util.matcher(self.root, "", [pat], [], [])[1]
448 l.append((mf, cmd))
448 l.append((mf, cmd))
449 self.decodepats = l
449 self.decodepats = l
450
450
451 for mf, cmd in self.decodepats:
451 for mf, cmd in self.decodepats:
452 if mf(filename):
452 if mf(filename):
453 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
453 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
454 data = util.filter(data, cmd)
454 data = util.filter(data, cmd)
455 break
455 break
456
456
457 if fd:
457 if fd:
458 return fd.write(data)
458 return fd.write(data)
459 return self.wopener(filename, 'w').write(data)
459 return self.wopener(filename, 'w').write(data)
460
460
461 def transaction(self):
461 def transaction(self):
462 tr = self.transhandle
462 tr = self.transhandle
463 if tr != None and tr.running():
463 if tr != None and tr.running():
464 return tr.nest()
464 return tr.nest()
465
465
466 # save dirstate for rollback
466 # save dirstate for rollback
467 try:
467 try:
468 ds = self.opener("dirstate").read()
468 ds = self.opener("dirstate").read()
469 except IOError:
469 except IOError:
470 ds = ""
470 ds = ""
471 self.opener("journal.dirstate", "w").write(ds)
471 self.opener("journal.dirstate", "w").write(ds)
472
472
473 tr = transaction.transaction(self.ui.warn, self.sopener,
473 tr = transaction.transaction(self.ui.warn, self.sopener,
474 self.sjoin("journal"),
474 self.sjoin("journal"),
475 aftertrans(self.path))
475 aftertrans(self.path))
476 self.transhandle = tr
476 self.transhandle = tr
477 return tr
477 return tr
478
478
479 def recover(self):
479 def recover(self):
480 l = self.lock()
480 l = self.lock()
481 if os.path.exists(self.sjoin("journal")):
481 if os.path.exists(self.sjoin("journal")):
482 self.ui.status(_("rolling back interrupted transaction\n"))
482 self.ui.status(_("rolling back interrupted transaction\n"))
483 transaction.rollback(self.sopener, self.sjoin("journal"))
483 transaction.rollback(self.sopener, self.sjoin("journal"))
484 self.reload()
484 self.reload()
485 return True
485 return True
486 else:
486 else:
487 self.ui.warn(_("no interrupted transaction available\n"))
487 self.ui.warn(_("no interrupted transaction available\n"))
488 return False
488 return False
489
489
490 def rollback(self, wlock=None):
490 def rollback(self, wlock=None):
491 if not wlock:
491 if not wlock:
492 wlock = self.wlock()
492 wlock = self.wlock()
493 l = self.lock()
493 l = self.lock()
494 if os.path.exists(self.sjoin("undo")):
494 if os.path.exists(self.sjoin("undo")):
495 self.ui.status(_("rolling back last transaction\n"))
495 self.ui.status(_("rolling back last transaction\n"))
496 transaction.rollback(self.sopener, self.sjoin("undo"))
496 transaction.rollback(self.sopener, self.sjoin("undo"))
497 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
497 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
498 self.reload()
498 self.reload()
499 self.wreload()
499 self.wreload()
500 else:
500 else:
501 self.ui.warn(_("no rollback information available\n"))
501 self.ui.warn(_("no rollback information available\n"))
502
502
503 def wreload(self):
503 def wreload(self):
504 self.dirstate.read()
504 self.dirstate.read()
505
505
506 def reload(self):
506 def reload(self):
507 self.changelog.load()
507 self.changelog.load()
508 self.manifest.load()
508 self.manifest.load()
509 self.tagscache = None
509 self.tagscache = None
510 self.nodetagscache = None
510 self.nodetagscache = None
511
511
512 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
512 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
513 desc=None):
513 desc=None):
514 try:
514 try:
515 l = lock.lock(lockname, 0, releasefn, desc=desc)
515 l = lock.lock(lockname, 0, releasefn, desc=desc)
516 except lock.LockHeld, inst:
516 except lock.LockHeld, inst:
517 if not wait:
517 if not wait:
518 raise
518 raise
519 self.ui.warn(_("waiting for lock on %s held by %r\n") %
519 self.ui.warn(_("waiting for lock on %s held by %r\n") %
520 (desc, inst.locker))
520 (desc, inst.locker))
521 # default to 600 seconds timeout
521 # default to 600 seconds timeout
522 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
522 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
523 releasefn, desc=desc)
523 releasefn, desc=desc)
524 if acquirefn:
524 if acquirefn:
525 acquirefn()
525 acquirefn()
526 return l
526 return l
527
527
528 def lock(self, wait=1):
528 def lock(self, wait=1):
529 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
529 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
530 desc=_('repository %s') % self.origroot)
530 desc=_('repository %s') % self.origroot)
531
531
532 def wlock(self, wait=1):
532 def wlock(self, wait=1):
533 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
533 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
534 self.wreload,
534 self.wreload,
535 desc=_('working directory of %s') % self.origroot)
535 desc=_('working directory of %s') % self.origroot)
536
536
537 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
537 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
538 """
538 """
539 commit an individual file as part of a larger transaction
539 commit an individual file as part of a larger transaction
540 """
540 """
541
541
542 t = self.wread(fn)
542 t = self.wread(fn)
543 fl = self.file(fn)
543 fl = self.file(fn)
544 fp1 = manifest1.get(fn, nullid)
544 fp1 = manifest1.get(fn, nullid)
545 fp2 = manifest2.get(fn, nullid)
545 fp2 = manifest2.get(fn, nullid)
546
546
547 meta = {}
547 meta = {}
548 cp = self.dirstate.copied(fn)
548 cp = self.dirstate.copied(fn)
549 if cp:
549 if cp:
550 meta["copy"] = cp
550 meta["copy"] = cp
551 if not manifest2: # not a branch merge
551 if not manifest2: # not a branch merge
552 meta["copyrev"] = hex(manifest1.get(cp, nullid))
552 meta["copyrev"] = hex(manifest1.get(cp, nullid))
553 fp2 = nullid
553 fp2 = nullid
554 elif fp2 != nullid: # copied on remote side
554 elif fp2 != nullid: # copied on remote side
555 meta["copyrev"] = hex(manifest1.get(cp, nullid))
555 meta["copyrev"] = hex(manifest1.get(cp, nullid))
556 else: # copied on local side, reversed
556 elif fp1 != nullid: # copied on local side, reversed
557 meta["copyrev"] = hex(manifest2.get(cp))
557 meta["copyrev"] = hex(manifest2.get(cp))
558 fp2 = nullid
558 fp2 = nullid
559 else: # directory rename
560 meta["copyrev"] = hex(manifest1.get(cp, nullid))
559 self.ui.debug(_(" %s: copy %s:%s\n") %
561 self.ui.debug(_(" %s: copy %s:%s\n") %
560 (fn, cp, meta["copyrev"]))
562 (fn, cp, meta["copyrev"]))
561 fp1 = nullid
563 fp1 = nullid
562 elif fp2 != nullid:
564 elif fp2 != nullid:
563 # is one parent an ancestor of the other?
565 # is one parent an ancestor of the other?
564 fpa = fl.ancestor(fp1, fp2)
566 fpa = fl.ancestor(fp1, fp2)
565 if fpa == fp1:
567 if fpa == fp1:
566 fp1, fp2 = fp2, nullid
568 fp1, fp2 = fp2, nullid
567 elif fpa == fp2:
569 elif fpa == fp2:
568 fp2 = nullid
570 fp2 = nullid
569
571
570 # is the file unmodified from the parent? report existing entry
572 # is the file unmodified from the parent? report existing entry
571 if fp2 == nullid and not fl.cmp(fp1, t):
573 if fp2 == nullid and not fl.cmp(fp1, t):
572 return fp1
574 return fp1
573
575
574 changelist.append(fn)
576 changelist.append(fn)
575 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
577 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
576
578
577 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
579 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
578 if p1 is None:
580 if p1 is None:
579 p1, p2 = self.dirstate.parents()
581 p1, p2 = self.dirstate.parents()
580 return self.commit(files=files, text=text, user=user, date=date,
582 return self.commit(files=files, text=text, user=user, date=date,
581 p1=p1, p2=p2, wlock=wlock)
583 p1=p1, p2=p2, wlock=wlock)
582
584
583 def commit(self, files=None, text="", user=None, date=None,
585 def commit(self, files=None, text="", user=None, date=None,
584 match=util.always, force=False, lock=None, wlock=None,
586 match=util.always, force=False, lock=None, wlock=None,
585 force_editor=False, p1=None, p2=None, extra={}):
587 force_editor=False, p1=None, p2=None, extra={}):
586
588
587 commit = []
589 commit = []
588 remove = []
590 remove = []
589 changed = []
591 changed = []
590 use_dirstate = (p1 is None) # not rawcommit
592 use_dirstate = (p1 is None) # not rawcommit
591 extra = extra.copy()
593 extra = extra.copy()
592
594
593 if use_dirstate:
595 if use_dirstate:
594 if files:
596 if files:
595 for f in files:
597 for f in files:
596 s = self.dirstate.state(f)
598 s = self.dirstate.state(f)
597 if s in 'nmai':
599 if s in 'nmai':
598 commit.append(f)
600 commit.append(f)
599 elif s == 'r':
601 elif s == 'r':
600 remove.append(f)
602 remove.append(f)
601 else:
603 else:
602 self.ui.warn(_("%s not tracked!\n") % f)
604 self.ui.warn(_("%s not tracked!\n") % f)
603 else:
605 else:
604 changes = self.status(match=match)[:5]
606 changes = self.status(match=match)[:5]
605 modified, added, removed, deleted, unknown = changes
607 modified, added, removed, deleted, unknown = changes
606 commit = modified + added
608 commit = modified + added
607 remove = removed
609 remove = removed
608 else:
610 else:
609 commit = files
611 commit = files
610
612
611 if use_dirstate:
613 if use_dirstate:
612 p1, p2 = self.dirstate.parents()
614 p1, p2 = self.dirstate.parents()
613 update_dirstate = True
615 update_dirstate = True
614 else:
616 else:
615 p1, p2 = p1, p2 or nullid
617 p1, p2 = p1, p2 or nullid
616 update_dirstate = (self.dirstate.parents()[0] == p1)
618 update_dirstate = (self.dirstate.parents()[0] == p1)
617
619
618 c1 = self.changelog.read(p1)
620 c1 = self.changelog.read(p1)
619 c2 = self.changelog.read(p2)
621 c2 = self.changelog.read(p2)
620 m1 = self.manifest.read(c1[0]).copy()
622 m1 = self.manifest.read(c1[0]).copy()
621 m2 = self.manifest.read(c2[0])
623 m2 = self.manifest.read(c2[0])
622
624
623 if use_dirstate:
625 if use_dirstate:
624 branchname = self.workingctx().branch()
626 branchname = self.workingctx().branch()
625 else:
627 else:
626 branchname = ""
628 branchname = ""
627
629
628 if use_dirstate:
630 if use_dirstate:
629 oldname = c1[5].get("branch", "")
631 oldname = c1[5].get("branch", "")
630 if not commit and not remove and not force and p2 == nullid and \
632 if not commit and not remove and not force and p2 == nullid and \
631 branchname == oldname:
633 branchname == oldname:
632 self.ui.status(_("nothing changed\n"))
634 self.ui.status(_("nothing changed\n"))
633 return None
635 return None
634
636
635 xp1 = hex(p1)
637 xp1 = hex(p1)
636 if p2 == nullid: xp2 = ''
638 if p2 == nullid: xp2 = ''
637 else: xp2 = hex(p2)
639 else: xp2 = hex(p2)
638
640
639 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
641 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
640
642
641 if not wlock:
643 if not wlock:
642 wlock = self.wlock()
644 wlock = self.wlock()
643 if not lock:
645 if not lock:
644 lock = self.lock()
646 lock = self.lock()
645 tr = self.transaction()
647 tr = self.transaction()
646
648
647 # check in files
649 # check in files
648 new = {}
650 new = {}
649 linkrev = self.changelog.count()
651 linkrev = self.changelog.count()
650 commit.sort()
652 commit.sort()
651 for f in commit:
653 for f in commit:
652 self.ui.note(f + "\n")
654 self.ui.note(f + "\n")
653 try:
655 try:
654 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
656 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
655 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
657 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
656 except IOError:
658 except IOError:
657 if use_dirstate:
659 if use_dirstate:
658 self.ui.warn(_("trouble committing %s!\n") % f)
660 self.ui.warn(_("trouble committing %s!\n") % f)
659 raise
661 raise
660 else:
662 else:
661 remove.append(f)
663 remove.append(f)
662
664
663 # update manifest
665 # update manifest
664 m1.update(new)
666 m1.update(new)
665 remove.sort()
667 remove.sort()
666
668
667 for f in remove:
669 for f in remove:
668 if f in m1:
670 if f in m1:
669 del m1[f]
671 del m1[f]
670 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
672 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
671
673
672 # add changeset
674 # add changeset
673 new = new.keys()
675 new = new.keys()
674 new.sort()
676 new.sort()
675
677
676 user = user or self.ui.username()
678 user = user or self.ui.username()
677 if not text or force_editor:
679 if not text or force_editor:
678 edittext = []
680 edittext = []
679 if text:
681 if text:
680 edittext.append(text)
682 edittext.append(text)
681 edittext.append("")
683 edittext.append("")
682 edittext.append("HG: user: %s" % user)
684 edittext.append("HG: user: %s" % user)
683 if p2 != nullid:
685 if p2 != nullid:
684 edittext.append("HG: branch merge")
686 edittext.append("HG: branch merge")
685 edittext.extend(["HG: changed %s" % f for f in changed])
687 edittext.extend(["HG: changed %s" % f for f in changed])
686 edittext.extend(["HG: removed %s" % f for f in remove])
688 edittext.extend(["HG: removed %s" % f for f in remove])
687 if not changed and not remove:
689 if not changed and not remove:
688 edittext.append("HG: no files changed")
690 edittext.append("HG: no files changed")
689 edittext.append("")
691 edittext.append("")
690 # run editor in the repository root
692 # run editor in the repository root
691 olddir = os.getcwd()
693 olddir = os.getcwd()
692 os.chdir(self.root)
694 os.chdir(self.root)
693 text = self.ui.edit("\n".join(edittext), user)
695 text = self.ui.edit("\n".join(edittext), user)
694 os.chdir(olddir)
696 os.chdir(olddir)
695
697
696 lines = [line.rstrip() for line in text.rstrip().splitlines()]
698 lines = [line.rstrip() for line in text.rstrip().splitlines()]
697 while lines and not lines[0]:
699 while lines and not lines[0]:
698 del lines[0]
700 del lines[0]
699 if not lines:
701 if not lines:
700 return None
702 return None
701 text = '\n'.join(lines)
703 text = '\n'.join(lines)
702 if branchname:
704 if branchname:
703 extra["branch"] = branchname
705 extra["branch"] = branchname
704 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
706 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
705 user, date, extra)
707 user, date, extra)
706 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
708 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
707 parent2=xp2)
709 parent2=xp2)
708 tr.close()
710 tr.close()
709
711
710 if use_dirstate or update_dirstate:
712 if use_dirstate or update_dirstate:
711 self.dirstate.setparents(n)
713 self.dirstate.setparents(n)
712 if use_dirstate:
714 if use_dirstate:
713 self.dirstate.update(new, "n")
715 self.dirstate.update(new, "n")
714 self.dirstate.forget(remove)
716 self.dirstate.forget(remove)
715
717
716 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
718 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
717 return n
719 return n
718
720
719 def walk(self, node=None, files=[], match=util.always, badmatch=None):
721 def walk(self, node=None, files=[], match=util.always, badmatch=None):
720 '''
722 '''
721 walk recursively through the directory tree or a given
723 walk recursively through the directory tree or a given
722 changeset, finding all files matched by the match
724 changeset, finding all files matched by the match
723 function
725 function
724
726
725 results are yielded in a tuple (src, filename), where src
727 results are yielded in a tuple (src, filename), where src
726 is one of:
728 is one of:
727 'f' the file was found in the directory tree
729 'f' the file was found in the directory tree
728 'm' the file was only in the dirstate and not in the tree
730 'm' the file was only in the dirstate and not in the tree
729 'b' file was not found and matched badmatch
731 'b' file was not found and matched badmatch
730 '''
732 '''
731
733
732 if node:
734 if node:
733 fdict = dict.fromkeys(files)
735 fdict = dict.fromkeys(files)
734 for fn in self.manifest.read(self.changelog.read(node)[0]):
736 for fn in self.manifest.read(self.changelog.read(node)[0]):
735 for ffn in fdict:
737 for ffn in fdict:
736 # match if the file is the exact name or a directory
738 # match if the file is the exact name or a directory
737 if ffn == fn or fn.startswith("%s/" % ffn):
739 if ffn == fn or fn.startswith("%s/" % ffn):
738 del fdict[ffn]
740 del fdict[ffn]
739 break
741 break
740 if match(fn):
742 if match(fn):
741 yield 'm', fn
743 yield 'm', fn
742 for fn in fdict:
744 for fn in fdict:
743 if badmatch and badmatch(fn):
745 if badmatch and badmatch(fn):
744 if match(fn):
746 if match(fn):
745 yield 'b', fn
747 yield 'b', fn
746 else:
748 else:
747 self.ui.warn(_('%s: No such file in rev %s\n') % (
749 self.ui.warn(_('%s: No such file in rev %s\n') % (
748 util.pathto(self.getcwd(), fn), short(node)))
750 util.pathto(self.getcwd(), fn), short(node)))
749 else:
751 else:
750 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
752 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
751 yield src, fn
753 yield src, fn
752
754
753 def status(self, node1=None, node2=None, files=[], match=util.always,
755 def status(self, node1=None, node2=None, files=[], match=util.always,
754 wlock=None, list_ignored=False, list_clean=False):
756 wlock=None, list_ignored=False, list_clean=False):
755 """return status of files between two nodes or node and working directory
757 """return status of files between two nodes or node and working directory
756
758
757 If node1 is None, use the first dirstate parent instead.
759 If node1 is None, use the first dirstate parent instead.
758 If node2 is None, compare node1 with working directory.
760 If node2 is None, compare node1 with working directory.
759 """
761 """
760
762
761 def fcmp(fn, mf):
763 def fcmp(fn, mf):
762 t1 = self.wread(fn)
764 t1 = self.wread(fn)
763 return self.file(fn).cmp(mf.get(fn, nullid), t1)
765 return self.file(fn).cmp(mf.get(fn, nullid), t1)
764
766
765 def mfmatches(node):
767 def mfmatches(node):
766 change = self.changelog.read(node)
768 change = self.changelog.read(node)
767 mf = self.manifest.read(change[0]).copy()
769 mf = self.manifest.read(change[0]).copy()
768 for fn in mf.keys():
770 for fn in mf.keys():
769 if not match(fn):
771 if not match(fn):
770 del mf[fn]
772 del mf[fn]
771 return mf
773 return mf
772
774
773 modified, added, removed, deleted, unknown = [], [], [], [], []
775 modified, added, removed, deleted, unknown = [], [], [], [], []
774 ignored, clean = [], []
776 ignored, clean = [], []
775
777
776 compareworking = False
778 compareworking = False
777 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
779 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
778 compareworking = True
780 compareworking = True
779
781
780 if not compareworking:
782 if not compareworking:
781 # read the manifest from node1 before the manifest from node2,
783 # read the manifest from node1 before the manifest from node2,
782 # so that we'll hit the manifest cache if we're going through
784 # so that we'll hit the manifest cache if we're going through
783 # all the revisions in parent->child order.
785 # all the revisions in parent->child order.
784 mf1 = mfmatches(node1)
786 mf1 = mfmatches(node1)
785
787
786 # are we comparing the working directory?
788 # are we comparing the working directory?
787 if not node2:
789 if not node2:
788 if not wlock:
790 if not wlock:
789 try:
791 try:
790 wlock = self.wlock(wait=0)
792 wlock = self.wlock(wait=0)
791 except lock.LockException:
793 except lock.LockException:
792 wlock = None
794 wlock = None
793 (lookup, modified, added, removed, deleted, unknown,
795 (lookup, modified, added, removed, deleted, unknown,
794 ignored, clean) = self.dirstate.status(files, match,
796 ignored, clean) = self.dirstate.status(files, match,
795 list_ignored, list_clean)
797 list_ignored, list_clean)
796
798
797 # are we comparing working dir against its parent?
799 # are we comparing working dir against its parent?
798 if compareworking:
800 if compareworking:
799 if lookup:
801 if lookup:
800 # do a full compare of any files that might have changed
802 # do a full compare of any files that might have changed
801 mf2 = mfmatches(self.dirstate.parents()[0])
803 mf2 = mfmatches(self.dirstate.parents()[0])
802 for f in lookup:
804 for f in lookup:
803 if fcmp(f, mf2):
805 if fcmp(f, mf2):
804 modified.append(f)
806 modified.append(f)
805 else:
807 else:
806 clean.append(f)
808 clean.append(f)
807 if wlock is not None:
809 if wlock is not None:
808 self.dirstate.update([f], "n")
810 self.dirstate.update([f], "n")
809 else:
811 else:
810 # we are comparing working dir against non-parent
812 # we are comparing working dir against non-parent
811 # generate a pseudo-manifest for the working dir
813 # generate a pseudo-manifest for the working dir
812 # XXX: create it in dirstate.py ?
814 # XXX: create it in dirstate.py ?
813 mf2 = mfmatches(self.dirstate.parents()[0])
815 mf2 = mfmatches(self.dirstate.parents()[0])
814 for f in lookup + modified + added:
816 for f in lookup + modified + added:
815 mf2[f] = ""
817 mf2[f] = ""
816 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
818 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
817 for f in removed:
819 for f in removed:
818 if f in mf2:
820 if f in mf2:
819 del mf2[f]
821 del mf2[f]
820 else:
822 else:
821 # we are comparing two revisions
823 # we are comparing two revisions
822 mf2 = mfmatches(node2)
824 mf2 = mfmatches(node2)
823
825
824 if not compareworking:
826 if not compareworking:
825 # flush lists from dirstate before comparing manifests
827 # flush lists from dirstate before comparing manifests
826 modified, added, clean = [], [], []
828 modified, added, clean = [], [], []
827
829
828 # make sure to sort the files so we talk to the disk in a
830 # make sure to sort the files so we talk to the disk in a
829 # reasonable order
831 # reasonable order
830 mf2keys = mf2.keys()
832 mf2keys = mf2.keys()
831 mf2keys.sort()
833 mf2keys.sort()
832 for fn in mf2keys:
834 for fn in mf2keys:
833 if mf1.has_key(fn):
835 if mf1.has_key(fn):
834 if mf1.flags(fn) != mf2.flags(fn) or \
836 if mf1.flags(fn) != mf2.flags(fn) or \
835 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
837 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
836 modified.append(fn)
838 modified.append(fn)
837 elif list_clean:
839 elif list_clean:
838 clean.append(fn)
840 clean.append(fn)
839 del mf1[fn]
841 del mf1[fn]
840 else:
842 else:
841 added.append(fn)
843 added.append(fn)
842
844
843 removed = mf1.keys()
845 removed = mf1.keys()
844
846
845 # sort and return results:
847 # sort and return results:
846 for l in modified, added, removed, deleted, unknown, ignored, clean:
848 for l in modified, added, removed, deleted, unknown, ignored, clean:
847 l.sort()
849 l.sort()
848 return (modified, added, removed, deleted, unknown, ignored, clean)
850 return (modified, added, removed, deleted, unknown, ignored, clean)
849
851
850 def add(self, list, wlock=None):
852 def add(self, list, wlock=None):
851 if not wlock:
853 if not wlock:
852 wlock = self.wlock()
854 wlock = self.wlock()
853 for f in list:
855 for f in list:
854 p = self.wjoin(f)
856 p = self.wjoin(f)
855 if not os.path.exists(p):
857 if not os.path.exists(p):
856 self.ui.warn(_("%s does not exist!\n") % f)
858 self.ui.warn(_("%s does not exist!\n") % f)
857 elif not os.path.isfile(p):
859 elif not os.path.isfile(p):
858 self.ui.warn(_("%s not added: only files supported currently\n")
860 self.ui.warn(_("%s not added: only files supported currently\n")
859 % f)
861 % f)
860 elif self.dirstate.state(f) in 'an':
862 elif self.dirstate.state(f) in 'an':
861 self.ui.warn(_("%s already tracked!\n") % f)
863 self.ui.warn(_("%s already tracked!\n") % f)
862 else:
864 else:
863 self.dirstate.update([f], "a")
865 self.dirstate.update([f], "a")
864
866
865 def forget(self, list, wlock=None):
867 def forget(self, list, wlock=None):
866 if not wlock:
868 if not wlock:
867 wlock = self.wlock()
869 wlock = self.wlock()
868 for f in list:
870 for f in list:
869 if self.dirstate.state(f) not in 'ai':
871 if self.dirstate.state(f) not in 'ai':
870 self.ui.warn(_("%s not added!\n") % f)
872 self.ui.warn(_("%s not added!\n") % f)
871 else:
873 else:
872 self.dirstate.forget([f])
874 self.dirstate.forget([f])
873
875
874 def remove(self, list, unlink=False, wlock=None):
876 def remove(self, list, unlink=False, wlock=None):
875 if unlink:
877 if unlink:
876 for f in list:
878 for f in list:
877 try:
879 try:
878 util.unlink(self.wjoin(f))
880 util.unlink(self.wjoin(f))
879 except OSError, inst:
881 except OSError, inst:
880 if inst.errno != errno.ENOENT:
882 if inst.errno != errno.ENOENT:
881 raise
883 raise
882 if not wlock:
884 if not wlock:
883 wlock = self.wlock()
885 wlock = self.wlock()
884 for f in list:
886 for f in list:
885 p = self.wjoin(f)
887 p = self.wjoin(f)
886 if os.path.exists(p):
888 if os.path.exists(p):
887 self.ui.warn(_("%s still exists!\n") % f)
889 self.ui.warn(_("%s still exists!\n") % f)
888 elif self.dirstate.state(f) == 'a':
890 elif self.dirstate.state(f) == 'a':
889 self.dirstate.forget([f])
891 self.dirstate.forget([f])
890 elif f not in self.dirstate:
892 elif f not in self.dirstate:
891 self.ui.warn(_("%s not tracked!\n") % f)
893 self.ui.warn(_("%s not tracked!\n") % f)
892 else:
894 else:
893 self.dirstate.update([f], "r")
895 self.dirstate.update([f], "r")
894
896
895 def undelete(self, list, wlock=None):
897 def undelete(self, list, wlock=None):
896 p = self.dirstate.parents()[0]
898 p = self.dirstate.parents()[0]
897 mn = self.changelog.read(p)[0]
899 mn = self.changelog.read(p)[0]
898 m = self.manifest.read(mn)
900 m = self.manifest.read(mn)
899 if not wlock:
901 if not wlock:
900 wlock = self.wlock()
902 wlock = self.wlock()
901 for f in list:
903 for f in list:
902 if self.dirstate.state(f) not in "r":
904 if self.dirstate.state(f) not in "r":
903 self.ui.warn("%s not removed!\n" % f)
905 self.ui.warn("%s not removed!\n" % f)
904 else:
906 else:
905 t = self.file(f).read(m[f])
907 t = self.file(f).read(m[f])
906 self.wwrite(f, t)
908 self.wwrite(f, t)
907 util.set_exec(self.wjoin(f), m.execf(f))
909 util.set_exec(self.wjoin(f), m.execf(f))
908 self.dirstate.update([f], "n")
910 self.dirstate.update([f], "n")
909
911
910 def copy(self, source, dest, wlock=None):
912 def copy(self, source, dest, wlock=None):
911 p = self.wjoin(dest)
913 p = self.wjoin(dest)
912 if not os.path.exists(p):
914 if not os.path.exists(p):
913 self.ui.warn(_("%s does not exist!\n") % dest)
915 self.ui.warn(_("%s does not exist!\n") % dest)
914 elif not os.path.isfile(p):
916 elif not os.path.isfile(p):
915 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
917 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
916 else:
918 else:
917 if not wlock:
919 if not wlock:
918 wlock = self.wlock()
920 wlock = self.wlock()
919 if self.dirstate.state(dest) == '?':
921 if self.dirstate.state(dest) == '?':
920 self.dirstate.update([dest], "a")
922 self.dirstate.update([dest], "a")
921 self.dirstate.copy(source, dest)
923 self.dirstate.copy(source, dest)
922
924
923 def heads(self, start=None):
925 def heads(self, start=None):
924 heads = self.changelog.heads(start)
926 heads = self.changelog.heads(start)
925 # sort the output in rev descending order
927 # sort the output in rev descending order
926 heads = [(-self.changelog.rev(h), h) for h in heads]
928 heads = [(-self.changelog.rev(h), h) for h in heads]
927 heads.sort()
929 heads.sort()
928 return [n for (r, n) in heads]
930 return [n for (r, n) in heads]
929
931
930 # branchlookup returns a dict giving a list of branches for
932 # branchlookup returns a dict giving a list of branches for
931 # each head. A branch is defined as the tag of a node or
933 # each head. A branch is defined as the tag of a node or
932 # the branch of the node's parents. If a node has multiple
934 # the branch of the node's parents. If a node has multiple
933 # branch tags, tags are eliminated if they are visible from other
935 # branch tags, tags are eliminated if they are visible from other
934 # branch tags.
936 # branch tags.
935 #
937 #
936 # So, for this graph: a->b->c->d->e
938 # So, for this graph: a->b->c->d->e
937 # \ /
939 # \ /
938 # aa -----/
940 # aa -----/
939 # a has tag 2.6.12
941 # a has tag 2.6.12
940 # d has tag 2.6.13
942 # d has tag 2.6.13
941 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
943 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
942 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
944 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
943 # from the list.
945 # from the list.
944 #
946 #
945 # It is possible that more than one head will have the same branch tag.
947 # It is possible that more than one head will have the same branch tag.
946 # callers need to check the result for multiple heads under the same
948 # callers need to check the result for multiple heads under the same
947 # branch tag if that is a problem for them (ie checkout of a specific
949 # branch tag if that is a problem for them (ie checkout of a specific
948 # branch).
950 # branch).
949 #
951 #
950 # passing in a specific branch will limit the depth of the search
952 # passing in a specific branch will limit the depth of the search
951 # through the parents. It won't limit the branches returned in the
953 # through the parents. It won't limit the branches returned in the
952 # result though.
954 # result though.
953 def branchlookup(self, heads=None, branch=None):
955 def branchlookup(self, heads=None, branch=None):
954 if not heads:
956 if not heads:
955 heads = self.heads()
957 heads = self.heads()
956 headt = [ h for h in heads ]
958 headt = [ h for h in heads ]
957 chlog = self.changelog
959 chlog = self.changelog
958 branches = {}
960 branches = {}
959 merges = []
961 merges = []
960 seenmerge = {}
962 seenmerge = {}
961
963
962 # traverse the tree once for each head, recording in the branches
964 # traverse the tree once for each head, recording in the branches
963 # dict which tags are visible from this head. The branches
965 # dict which tags are visible from this head. The branches
964 # dict also records which tags are visible from each tag
966 # dict also records which tags are visible from each tag
965 # while we traverse.
967 # while we traverse.
966 while headt or merges:
968 while headt or merges:
967 if merges:
969 if merges:
968 n, found = merges.pop()
970 n, found = merges.pop()
969 visit = [n]
971 visit = [n]
970 else:
972 else:
971 h = headt.pop()
973 h = headt.pop()
972 visit = [h]
974 visit = [h]
973 found = [h]
975 found = [h]
974 seen = {}
976 seen = {}
975 while visit:
977 while visit:
976 n = visit.pop()
978 n = visit.pop()
977 if n in seen:
979 if n in seen:
978 continue
980 continue
979 pp = chlog.parents(n)
981 pp = chlog.parents(n)
980 tags = self.nodetags(n)
982 tags = self.nodetags(n)
981 if tags:
983 if tags:
982 for x in tags:
984 for x in tags:
983 if x == 'tip':
985 if x == 'tip':
984 continue
986 continue
985 for f in found:
987 for f in found:
986 branches.setdefault(f, {})[n] = 1
988 branches.setdefault(f, {})[n] = 1
987 branches.setdefault(n, {})[n] = 1
989 branches.setdefault(n, {})[n] = 1
988 break
990 break
989 if n not in found:
991 if n not in found:
990 found.append(n)
992 found.append(n)
991 if branch in tags:
993 if branch in tags:
992 continue
994 continue
993 seen[n] = 1
995 seen[n] = 1
994 if pp[1] != nullid and n not in seenmerge:
996 if pp[1] != nullid and n not in seenmerge:
995 merges.append((pp[1], [x for x in found]))
997 merges.append((pp[1], [x for x in found]))
996 seenmerge[n] = 1
998 seenmerge[n] = 1
997 if pp[0] != nullid:
999 if pp[0] != nullid:
998 visit.append(pp[0])
1000 visit.append(pp[0])
999 # traverse the branches dict, eliminating branch tags from each
1001 # traverse the branches dict, eliminating branch tags from each
1000 # head that are visible from another branch tag for that head.
1002 # head that are visible from another branch tag for that head.
1001 out = {}
1003 out = {}
1002 viscache = {}
1004 viscache = {}
1003 for h in heads:
1005 for h in heads:
1004 def visible(node):
1006 def visible(node):
1005 if node in viscache:
1007 if node in viscache:
1006 return viscache[node]
1008 return viscache[node]
1007 ret = {}
1009 ret = {}
1008 visit = [node]
1010 visit = [node]
1009 while visit:
1011 while visit:
1010 x = visit.pop()
1012 x = visit.pop()
1011 if x in viscache:
1013 if x in viscache:
1012 ret.update(viscache[x])
1014 ret.update(viscache[x])
1013 elif x not in ret:
1015 elif x not in ret:
1014 ret[x] = 1
1016 ret[x] = 1
1015 if x in branches:
1017 if x in branches:
1016 visit[len(visit):] = branches[x].keys()
1018 visit[len(visit):] = branches[x].keys()
1017 viscache[node] = ret
1019 viscache[node] = ret
1018 return ret
1020 return ret
1019 if h not in branches:
1021 if h not in branches:
1020 continue
1022 continue
1021 # O(n^2), but somewhat limited. This only searches the
1023 # O(n^2), but somewhat limited. This only searches the
1022 # tags visible from a specific head, not all the tags in the
1024 # tags visible from a specific head, not all the tags in the
1023 # whole repo.
1025 # whole repo.
1024 for b in branches[h]:
1026 for b in branches[h]:
1025 vis = False
1027 vis = False
1026 for bb in branches[h].keys():
1028 for bb in branches[h].keys():
1027 if b != bb:
1029 if b != bb:
1028 if b in visible(bb):
1030 if b in visible(bb):
1029 vis = True
1031 vis = True
1030 break
1032 break
1031 if not vis:
1033 if not vis:
1032 l = out.setdefault(h, [])
1034 l = out.setdefault(h, [])
1033 l[len(l):] = self.nodetags(b)
1035 l[len(l):] = self.nodetags(b)
1034 return out
1036 return out
1035
1037
1036 def branches(self, nodes):
1038 def branches(self, nodes):
1037 if not nodes:
1039 if not nodes:
1038 nodes = [self.changelog.tip()]
1040 nodes = [self.changelog.tip()]
1039 b = []
1041 b = []
1040 for n in nodes:
1042 for n in nodes:
1041 t = n
1043 t = n
1042 while 1:
1044 while 1:
1043 p = self.changelog.parents(n)
1045 p = self.changelog.parents(n)
1044 if p[1] != nullid or p[0] == nullid:
1046 if p[1] != nullid or p[0] == nullid:
1045 b.append((t, n, p[0], p[1]))
1047 b.append((t, n, p[0], p[1]))
1046 break
1048 break
1047 n = p[0]
1049 n = p[0]
1048 return b
1050 return b
1049
1051
1050 def between(self, pairs):
1052 def between(self, pairs):
1051 r = []
1053 r = []
1052
1054
1053 for top, bottom in pairs:
1055 for top, bottom in pairs:
1054 n, l, i = top, [], 0
1056 n, l, i = top, [], 0
1055 f = 1
1057 f = 1
1056
1058
1057 while n != bottom:
1059 while n != bottom:
1058 p = self.changelog.parents(n)[0]
1060 p = self.changelog.parents(n)[0]
1059 if i == f:
1061 if i == f:
1060 l.append(n)
1062 l.append(n)
1061 f = f * 2
1063 f = f * 2
1062 n = p
1064 n = p
1063 i += 1
1065 i += 1
1064
1066
1065 r.append(l)
1067 r.append(l)
1066
1068
1067 return r
1069 return r
1068
1070
1069 def findincoming(self, remote, base=None, heads=None, force=False):
1071 def findincoming(self, remote, base=None, heads=None, force=False):
1070 """Return list of roots of the subsets of missing nodes from remote
1072 """Return list of roots of the subsets of missing nodes from remote
1071
1073
1072 If base dict is specified, assume that these nodes and their parents
1074 If base dict is specified, assume that these nodes and their parents
1073 exist on the remote side and that no child of a node of base exists
1075 exist on the remote side and that no child of a node of base exists
1074 in both remote and self.
1076 in both remote and self.
1075 Furthermore base will be updated to include the nodes that exists
1077 Furthermore base will be updated to include the nodes that exists
1076 in self and remote but no children exists in self and remote.
1078 in self and remote but no children exists in self and remote.
1077 If a list of heads is specified, return only nodes which are heads
1079 If a list of heads is specified, return only nodes which are heads
1078 or ancestors of these heads.
1080 or ancestors of these heads.
1079
1081
1080 All the ancestors of base are in self and in remote.
1082 All the ancestors of base are in self and in remote.
1081 All the descendants of the list returned are missing in self.
1083 All the descendants of the list returned are missing in self.
1082 (and so we know that the rest of the nodes are missing in remote, see
1084 (and so we know that the rest of the nodes are missing in remote, see
1083 outgoing)
1085 outgoing)
1084 """
1086 """
1085 m = self.changelog.nodemap
1087 m = self.changelog.nodemap
1086 search = []
1088 search = []
1087 fetch = {}
1089 fetch = {}
1088 seen = {}
1090 seen = {}
1089 seenbranch = {}
1091 seenbranch = {}
1090 if base == None:
1092 if base == None:
1091 base = {}
1093 base = {}
1092
1094
1093 if not heads:
1095 if not heads:
1094 heads = remote.heads()
1096 heads = remote.heads()
1095
1097
1096 if self.changelog.tip() == nullid:
1098 if self.changelog.tip() == nullid:
1097 base[nullid] = 1
1099 base[nullid] = 1
1098 if heads != [nullid]:
1100 if heads != [nullid]:
1099 return [nullid]
1101 return [nullid]
1100 return []
1102 return []
1101
1103
1102 # assume we're closer to the tip than the root
1104 # assume we're closer to the tip than the root
1103 # and start by examining the heads
1105 # and start by examining the heads
1104 self.ui.status(_("searching for changes\n"))
1106 self.ui.status(_("searching for changes\n"))
1105
1107
1106 unknown = []
1108 unknown = []
1107 for h in heads:
1109 for h in heads:
1108 if h not in m:
1110 if h not in m:
1109 unknown.append(h)
1111 unknown.append(h)
1110 else:
1112 else:
1111 base[h] = 1
1113 base[h] = 1
1112
1114
1113 if not unknown:
1115 if not unknown:
1114 return []
1116 return []
1115
1117
1116 req = dict.fromkeys(unknown)
1118 req = dict.fromkeys(unknown)
1117 reqcnt = 0
1119 reqcnt = 0
1118
1120
1119 # search through remote branches
1121 # search through remote branches
1120 # a 'branch' here is a linear segment of history, with four parts:
1122 # a 'branch' here is a linear segment of history, with four parts:
1121 # head, root, first parent, second parent
1123 # head, root, first parent, second parent
1122 # (a branch always has two parents (or none) by definition)
1124 # (a branch always has two parents (or none) by definition)
1123 unknown = remote.branches(unknown)
1125 unknown = remote.branches(unknown)
1124 while unknown:
1126 while unknown:
1125 r = []
1127 r = []
1126 while unknown:
1128 while unknown:
1127 n = unknown.pop(0)
1129 n = unknown.pop(0)
1128 if n[0] in seen:
1130 if n[0] in seen:
1129 continue
1131 continue
1130
1132
1131 self.ui.debug(_("examining %s:%s\n")
1133 self.ui.debug(_("examining %s:%s\n")
1132 % (short(n[0]), short(n[1])))
1134 % (short(n[0]), short(n[1])))
1133 if n[0] == nullid: # found the end of the branch
1135 if n[0] == nullid: # found the end of the branch
1134 pass
1136 pass
1135 elif n in seenbranch:
1137 elif n in seenbranch:
1136 self.ui.debug(_("branch already found\n"))
1138 self.ui.debug(_("branch already found\n"))
1137 continue
1139 continue
1138 elif n[1] and n[1] in m: # do we know the base?
1140 elif n[1] and n[1] in m: # do we know the base?
1139 self.ui.debug(_("found incomplete branch %s:%s\n")
1141 self.ui.debug(_("found incomplete branch %s:%s\n")
1140 % (short(n[0]), short(n[1])))
1142 % (short(n[0]), short(n[1])))
1141 search.append(n) # schedule branch range for scanning
1143 search.append(n) # schedule branch range for scanning
1142 seenbranch[n] = 1
1144 seenbranch[n] = 1
1143 else:
1145 else:
1144 if n[1] not in seen and n[1] not in fetch:
1146 if n[1] not in seen and n[1] not in fetch:
1145 if n[2] in m and n[3] in m:
1147 if n[2] in m and n[3] in m:
1146 self.ui.debug(_("found new changeset %s\n") %
1148 self.ui.debug(_("found new changeset %s\n") %
1147 short(n[1]))
1149 short(n[1]))
1148 fetch[n[1]] = 1 # earliest unknown
1150 fetch[n[1]] = 1 # earliest unknown
1149 for p in n[2:4]:
1151 for p in n[2:4]:
1150 if p in m:
1152 if p in m:
1151 base[p] = 1 # latest known
1153 base[p] = 1 # latest known
1152
1154
1153 for p in n[2:4]:
1155 for p in n[2:4]:
1154 if p not in req and p not in m:
1156 if p not in req and p not in m:
1155 r.append(p)
1157 r.append(p)
1156 req[p] = 1
1158 req[p] = 1
1157 seen[n[0]] = 1
1159 seen[n[0]] = 1
1158
1160
1159 if r:
1161 if r:
1160 reqcnt += 1
1162 reqcnt += 1
1161 self.ui.debug(_("request %d: %s\n") %
1163 self.ui.debug(_("request %d: %s\n") %
1162 (reqcnt, " ".join(map(short, r))))
1164 (reqcnt, " ".join(map(short, r))))
1163 for p in xrange(0, len(r), 10):
1165 for p in xrange(0, len(r), 10):
1164 for b in remote.branches(r[p:p+10]):
1166 for b in remote.branches(r[p:p+10]):
1165 self.ui.debug(_("received %s:%s\n") %
1167 self.ui.debug(_("received %s:%s\n") %
1166 (short(b[0]), short(b[1])))
1168 (short(b[0]), short(b[1])))
1167 unknown.append(b)
1169 unknown.append(b)
1168
1170
1169 # do binary search on the branches we found
1171 # do binary search on the branches we found
1170 while search:
1172 while search:
1171 n = search.pop(0)
1173 n = search.pop(0)
1172 reqcnt += 1
1174 reqcnt += 1
1173 l = remote.between([(n[0], n[1])])[0]
1175 l = remote.between([(n[0], n[1])])[0]
1174 l.append(n[1])
1176 l.append(n[1])
1175 p = n[0]
1177 p = n[0]
1176 f = 1
1178 f = 1
1177 for i in l:
1179 for i in l:
1178 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1180 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1179 if i in m:
1181 if i in m:
1180 if f <= 2:
1182 if f <= 2:
1181 self.ui.debug(_("found new branch changeset %s\n") %
1183 self.ui.debug(_("found new branch changeset %s\n") %
1182 short(p))
1184 short(p))
1183 fetch[p] = 1
1185 fetch[p] = 1
1184 base[i] = 1
1186 base[i] = 1
1185 else:
1187 else:
1186 self.ui.debug(_("narrowed branch search to %s:%s\n")
1188 self.ui.debug(_("narrowed branch search to %s:%s\n")
1187 % (short(p), short(i)))
1189 % (short(p), short(i)))
1188 search.append((p, i))
1190 search.append((p, i))
1189 break
1191 break
1190 p, f = i, f * 2
1192 p, f = i, f * 2
1191
1193
1192 # sanity check our fetch list
1194 # sanity check our fetch list
1193 for f in fetch.keys():
1195 for f in fetch.keys():
1194 if f in m:
1196 if f in m:
1195 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1197 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1196
1198
1197 if base.keys() == [nullid]:
1199 if base.keys() == [nullid]:
1198 if force:
1200 if force:
1199 self.ui.warn(_("warning: repository is unrelated\n"))
1201 self.ui.warn(_("warning: repository is unrelated\n"))
1200 else:
1202 else:
1201 raise util.Abort(_("repository is unrelated"))
1203 raise util.Abort(_("repository is unrelated"))
1202
1204
1203 self.ui.debug(_("found new changesets starting at ") +
1205 self.ui.debug(_("found new changesets starting at ") +
1204 " ".join([short(f) for f in fetch]) + "\n")
1206 " ".join([short(f) for f in fetch]) + "\n")
1205
1207
1206 self.ui.debug(_("%d total queries\n") % reqcnt)
1208 self.ui.debug(_("%d total queries\n") % reqcnt)
1207
1209
1208 return fetch.keys()
1210 return fetch.keys()
1209
1211
1210 def findoutgoing(self, remote, base=None, heads=None, force=False):
1212 def findoutgoing(self, remote, base=None, heads=None, force=False):
1211 """Return list of nodes that are roots of subsets not in remote
1213 """Return list of nodes that are roots of subsets not in remote
1212
1214
1213 If base dict is specified, assume that these nodes and their parents
1215 If base dict is specified, assume that these nodes and their parents
1214 exist on the remote side.
1216 exist on the remote side.
1215 If a list of heads is specified, return only nodes which are heads
1217 If a list of heads is specified, return only nodes which are heads
1216 or ancestors of these heads, and return a second element which
1218 or ancestors of these heads, and return a second element which
1217 contains all remote heads which get new children.
1219 contains all remote heads which get new children.
1218 """
1220 """
1219 if base == None:
1221 if base == None:
1220 base = {}
1222 base = {}
1221 self.findincoming(remote, base, heads, force=force)
1223 self.findincoming(remote, base, heads, force=force)
1222
1224
1223 self.ui.debug(_("common changesets up to ")
1225 self.ui.debug(_("common changesets up to ")
1224 + " ".join(map(short, base.keys())) + "\n")
1226 + " ".join(map(short, base.keys())) + "\n")
1225
1227
1226 remain = dict.fromkeys(self.changelog.nodemap)
1228 remain = dict.fromkeys(self.changelog.nodemap)
1227
1229
1228 # prune everything remote has from the tree
1230 # prune everything remote has from the tree
1229 del remain[nullid]
1231 del remain[nullid]
1230 remove = base.keys()
1232 remove = base.keys()
1231 while remove:
1233 while remove:
1232 n = remove.pop(0)
1234 n = remove.pop(0)
1233 if n in remain:
1235 if n in remain:
1234 del remain[n]
1236 del remain[n]
1235 for p in self.changelog.parents(n):
1237 for p in self.changelog.parents(n):
1236 remove.append(p)
1238 remove.append(p)
1237
1239
1238 # find every node whose parents have been pruned
1240 # find every node whose parents have been pruned
1239 subset = []
1241 subset = []
1240 # find every remote head that will get new children
1242 # find every remote head that will get new children
1241 updated_heads = {}
1243 updated_heads = {}
1242 for n in remain:
1244 for n in remain:
1243 p1, p2 = self.changelog.parents(n)
1245 p1, p2 = self.changelog.parents(n)
1244 if p1 not in remain and p2 not in remain:
1246 if p1 not in remain and p2 not in remain:
1245 subset.append(n)
1247 subset.append(n)
1246 if heads:
1248 if heads:
1247 if p1 in heads:
1249 if p1 in heads:
1248 updated_heads[p1] = True
1250 updated_heads[p1] = True
1249 if p2 in heads:
1251 if p2 in heads:
1250 updated_heads[p2] = True
1252 updated_heads[p2] = True
1251
1253
1252 # this is the set of all roots we have to push
1254 # this is the set of all roots we have to push
1253 if heads:
1255 if heads:
1254 return subset, updated_heads.keys()
1256 return subset, updated_heads.keys()
1255 else:
1257 else:
1256 return subset
1258 return subset
1257
1259
1258 def pull(self, remote, heads=None, force=False, lock=None):
1260 def pull(self, remote, heads=None, force=False, lock=None):
1259 mylock = False
1261 mylock = False
1260 if not lock:
1262 if not lock:
1261 lock = self.lock()
1263 lock = self.lock()
1262 mylock = True
1264 mylock = True
1263
1265
1264 try:
1266 try:
1265 fetch = self.findincoming(remote, force=force)
1267 fetch = self.findincoming(remote, force=force)
1266 if fetch == [nullid]:
1268 if fetch == [nullid]:
1267 self.ui.status(_("requesting all changes\n"))
1269 self.ui.status(_("requesting all changes\n"))
1268
1270
1269 if not fetch:
1271 if not fetch:
1270 self.ui.status(_("no changes found\n"))
1272 self.ui.status(_("no changes found\n"))
1271 return 0
1273 return 0
1272
1274
1273 if heads is None:
1275 if heads is None:
1274 cg = remote.changegroup(fetch, 'pull')
1276 cg = remote.changegroup(fetch, 'pull')
1275 else:
1277 else:
1276 if 'changegroupsubset' not in remote.capabilities:
1278 if 'changegroupsubset' not in remote.capabilities:
1277 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1279 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1278 cg = remote.changegroupsubset(fetch, heads, 'pull')
1280 cg = remote.changegroupsubset(fetch, heads, 'pull')
1279 return self.addchangegroup(cg, 'pull', remote.url())
1281 return self.addchangegroup(cg, 'pull', remote.url())
1280 finally:
1282 finally:
1281 if mylock:
1283 if mylock:
1282 lock.release()
1284 lock.release()
1283
1285
1284 def push(self, remote, force=False, revs=None):
1286 def push(self, remote, force=False, revs=None):
1285 # there are two ways to push to remote repo:
1287 # there are two ways to push to remote repo:
1286 #
1288 #
1287 # addchangegroup assumes local user can lock remote
1289 # addchangegroup assumes local user can lock remote
1288 # repo (local filesystem, old ssh servers).
1290 # repo (local filesystem, old ssh servers).
1289 #
1291 #
1290 # unbundle assumes local user cannot lock remote repo (new ssh
1292 # unbundle assumes local user cannot lock remote repo (new ssh
1291 # servers, http servers).
1293 # servers, http servers).
1292
1294
1293 if remote.capable('unbundle'):
1295 if remote.capable('unbundle'):
1294 return self.push_unbundle(remote, force, revs)
1296 return self.push_unbundle(remote, force, revs)
1295 return self.push_addchangegroup(remote, force, revs)
1297 return self.push_addchangegroup(remote, force, revs)
1296
1298
1297 def prepush(self, remote, force, revs):
1299 def prepush(self, remote, force, revs):
1298 base = {}
1300 base = {}
1299 remote_heads = remote.heads()
1301 remote_heads = remote.heads()
1300 inc = self.findincoming(remote, base, remote_heads, force=force)
1302 inc = self.findincoming(remote, base, remote_heads, force=force)
1301
1303
1302 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1304 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1303 if revs is not None:
1305 if revs is not None:
1304 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1306 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1305 else:
1307 else:
1306 bases, heads = update, self.changelog.heads()
1308 bases, heads = update, self.changelog.heads()
1307
1309
1308 if not bases:
1310 if not bases:
1309 self.ui.status(_("no changes found\n"))
1311 self.ui.status(_("no changes found\n"))
1310 return None, 1
1312 return None, 1
1311 elif not force:
1313 elif not force:
1312 # check if we're creating new remote heads
1314 # check if we're creating new remote heads
1313 # to be a remote head after push, node must be either
1315 # to be a remote head after push, node must be either
1314 # - unknown locally
1316 # - unknown locally
1315 # - a local outgoing head descended from update
1317 # - a local outgoing head descended from update
1316 # - a remote head that's known locally and not
1318 # - a remote head that's known locally and not
1317 # ancestral to an outgoing head
1319 # ancestral to an outgoing head
1318
1320
1319 warn = 0
1321 warn = 0
1320
1322
1321 if remote_heads == [nullid]:
1323 if remote_heads == [nullid]:
1322 warn = 0
1324 warn = 0
1323 elif not revs and len(heads) > len(remote_heads):
1325 elif not revs and len(heads) > len(remote_heads):
1324 warn = 1
1326 warn = 1
1325 else:
1327 else:
1326 newheads = list(heads)
1328 newheads = list(heads)
1327 for r in remote_heads:
1329 for r in remote_heads:
1328 if r in self.changelog.nodemap:
1330 if r in self.changelog.nodemap:
1329 desc = self.changelog.heads(r)
1331 desc = self.changelog.heads(r)
1330 l = [h for h in heads if h in desc]
1332 l = [h for h in heads if h in desc]
1331 if not l:
1333 if not l:
1332 newheads.append(r)
1334 newheads.append(r)
1333 else:
1335 else:
1334 newheads.append(r)
1336 newheads.append(r)
1335 if len(newheads) > len(remote_heads):
1337 if len(newheads) > len(remote_heads):
1336 warn = 1
1338 warn = 1
1337
1339
1338 if warn:
1340 if warn:
1339 self.ui.warn(_("abort: push creates new remote branches!\n"))
1341 self.ui.warn(_("abort: push creates new remote branches!\n"))
1340 self.ui.status(_("(did you forget to merge?"
1342 self.ui.status(_("(did you forget to merge?"
1341 " use push -f to force)\n"))
1343 " use push -f to force)\n"))
1342 return None, 1
1344 return None, 1
1343 elif inc:
1345 elif inc:
1344 self.ui.warn(_("note: unsynced remote changes!\n"))
1346 self.ui.warn(_("note: unsynced remote changes!\n"))
1345
1347
1346
1348
1347 if revs is None:
1349 if revs is None:
1348 cg = self.changegroup(update, 'push')
1350 cg = self.changegroup(update, 'push')
1349 else:
1351 else:
1350 cg = self.changegroupsubset(update, revs, 'push')
1352 cg = self.changegroupsubset(update, revs, 'push')
1351 return cg, remote_heads
1353 return cg, remote_heads
1352
1354
1353 def push_addchangegroup(self, remote, force, revs):
1355 def push_addchangegroup(self, remote, force, revs):
1354 lock = remote.lock()
1356 lock = remote.lock()
1355
1357
1356 ret = self.prepush(remote, force, revs)
1358 ret = self.prepush(remote, force, revs)
1357 if ret[0] is not None:
1359 if ret[0] is not None:
1358 cg, remote_heads = ret
1360 cg, remote_heads = ret
1359 return remote.addchangegroup(cg, 'push', self.url())
1361 return remote.addchangegroup(cg, 'push', self.url())
1360 return ret[1]
1362 return ret[1]
1361
1363
1362 def push_unbundle(self, remote, force, revs):
1364 def push_unbundle(self, remote, force, revs):
1363 # local repo finds heads on server, finds out what revs it
1365 # local repo finds heads on server, finds out what revs it
1364 # must push. once revs transferred, if server finds it has
1366 # must push. once revs transferred, if server finds it has
1365 # different heads (someone else won commit/push race), server
1367 # different heads (someone else won commit/push race), server
1366 # aborts.
1368 # aborts.
1367
1369
1368 ret = self.prepush(remote, force, revs)
1370 ret = self.prepush(remote, force, revs)
1369 if ret[0] is not None:
1371 if ret[0] is not None:
1370 cg, remote_heads = ret
1372 cg, remote_heads = ret
1371 if force: remote_heads = ['force']
1373 if force: remote_heads = ['force']
1372 return remote.unbundle(cg, remote_heads, 'push')
1374 return remote.unbundle(cg, remote_heads, 'push')
1373 return ret[1]
1375 return ret[1]
1374
1376
1375 def changegroupinfo(self, nodes):
1377 def changegroupinfo(self, nodes):
1376 self.ui.note(_("%d changesets found\n") % len(nodes))
1378 self.ui.note(_("%d changesets found\n") % len(nodes))
1377 if self.ui.debugflag:
1379 if self.ui.debugflag:
1378 self.ui.debug(_("List of changesets:\n"))
1380 self.ui.debug(_("List of changesets:\n"))
1379 for node in nodes:
1381 for node in nodes:
1380 self.ui.debug("%s\n" % hex(node))
1382 self.ui.debug("%s\n" % hex(node))
1381
1383
1382 def changegroupsubset(self, bases, heads, source):
1384 def changegroupsubset(self, bases, heads, source):
1383 """This function generates a changegroup consisting of all the nodes
1385 """This function generates a changegroup consisting of all the nodes
1384 that are descendents of any of the bases, and ancestors of any of
1386 that are descendents of any of the bases, and ancestors of any of
1385 the heads.
1387 the heads.
1386
1388
1387 It is fairly complex as determining which filenodes and which
1389 It is fairly complex as determining which filenodes and which
1388 manifest nodes need to be included for the changeset to be complete
1390 manifest nodes need to be included for the changeset to be complete
1389 is non-trivial.
1391 is non-trivial.
1390
1392
1391 Another wrinkle is doing the reverse, figuring out which changeset in
1393 Another wrinkle is doing the reverse, figuring out which changeset in
1392 the changegroup a particular filenode or manifestnode belongs to."""
1394 the changegroup a particular filenode or manifestnode belongs to."""
1393
1395
1394 self.hook('preoutgoing', throw=True, source=source)
1396 self.hook('preoutgoing', throw=True, source=source)
1395
1397
1396 # Set up some initial variables
1398 # Set up some initial variables
1397 # Make it easy to refer to self.changelog
1399 # Make it easy to refer to self.changelog
1398 cl = self.changelog
1400 cl = self.changelog
1399 # msng is short for missing - compute the list of changesets in this
1401 # msng is short for missing - compute the list of changesets in this
1400 # changegroup.
1402 # changegroup.
1401 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1403 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1402 self.changegroupinfo(msng_cl_lst)
1404 self.changegroupinfo(msng_cl_lst)
1403 # Some bases may turn out to be superfluous, and some heads may be
1405 # Some bases may turn out to be superfluous, and some heads may be
1404 # too. nodesbetween will return the minimal set of bases and heads
1406 # too. nodesbetween will return the minimal set of bases and heads
1405 # necessary to re-create the changegroup.
1407 # necessary to re-create the changegroup.
1406
1408
1407 # Known heads are the list of heads that it is assumed the recipient
1409 # Known heads are the list of heads that it is assumed the recipient
1408 # of this changegroup will know about.
1410 # of this changegroup will know about.
1409 knownheads = {}
1411 knownheads = {}
1410 # We assume that all parents of bases are known heads.
1412 # We assume that all parents of bases are known heads.
1411 for n in bases:
1413 for n in bases:
1412 for p in cl.parents(n):
1414 for p in cl.parents(n):
1413 if p != nullid:
1415 if p != nullid:
1414 knownheads[p] = 1
1416 knownheads[p] = 1
1415 knownheads = knownheads.keys()
1417 knownheads = knownheads.keys()
1416 if knownheads:
1418 if knownheads:
1417 # Now that we know what heads are known, we can compute which
1419 # Now that we know what heads are known, we can compute which
1418 # changesets are known. The recipient must know about all
1420 # changesets are known. The recipient must know about all
1419 # changesets required to reach the known heads from the null
1421 # changesets required to reach the known heads from the null
1420 # changeset.
1422 # changeset.
1421 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1423 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1422 junk = None
1424 junk = None
1423 # Transform the list into an ersatz set.
1425 # Transform the list into an ersatz set.
1424 has_cl_set = dict.fromkeys(has_cl_set)
1426 has_cl_set = dict.fromkeys(has_cl_set)
1425 else:
1427 else:
1426 # If there were no known heads, the recipient cannot be assumed to
1428 # If there were no known heads, the recipient cannot be assumed to
1427 # know about any changesets.
1429 # know about any changesets.
1428 has_cl_set = {}
1430 has_cl_set = {}
1429
1431
1430 # Make it easy to refer to self.manifest
1432 # Make it easy to refer to self.manifest
1431 mnfst = self.manifest
1433 mnfst = self.manifest
1432 # We don't know which manifests are missing yet
1434 # We don't know which manifests are missing yet
1433 msng_mnfst_set = {}
1435 msng_mnfst_set = {}
1434 # Nor do we know which filenodes are missing.
1436 # Nor do we know which filenodes are missing.
1435 msng_filenode_set = {}
1437 msng_filenode_set = {}
1436
1438
1437 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1439 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1438 junk = None
1440 junk = None
1439
1441
1440 # A changeset always belongs to itself, so the changenode lookup
1442 # A changeset always belongs to itself, so the changenode lookup
1441 # function for a changenode is identity.
1443 # function for a changenode is identity.
1442 def identity(x):
1444 def identity(x):
1443 return x
1445 return x
1444
1446
1445 # A function generating function. Sets up an environment for the
1447 # A function generating function. Sets up an environment for the
1446 # inner function.
1448 # inner function.
1447 def cmp_by_rev_func(revlog):
1449 def cmp_by_rev_func(revlog):
1448 # Compare two nodes by their revision number in the environment's
1450 # Compare two nodes by their revision number in the environment's
1449 # revision history. Since the revision number both represents the
1451 # revision history. Since the revision number both represents the
1450 # most efficient order to read the nodes in, and represents a
1452 # most efficient order to read the nodes in, and represents a
1451 # topological sorting of the nodes, this function is often useful.
1453 # topological sorting of the nodes, this function is often useful.
1452 def cmp_by_rev(a, b):
1454 def cmp_by_rev(a, b):
1453 return cmp(revlog.rev(a), revlog.rev(b))
1455 return cmp(revlog.rev(a), revlog.rev(b))
1454 return cmp_by_rev
1456 return cmp_by_rev
1455
1457
1456 # If we determine that a particular file or manifest node must be a
1458 # If we determine that a particular file or manifest node must be a
1457 # node that the recipient of the changegroup will already have, we can
1459 # node that the recipient of the changegroup will already have, we can
1458 # also assume the recipient will have all the parents. This function
1460 # also assume the recipient will have all the parents. This function
1459 # prunes them from the set of missing nodes.
1461 # prunes them from the set of missing nodes.
1460 def prune_parents(revlog, hasset, msngset):
1462 def prune_parents(revlog, hasset, msngset):
1461 haslst = hasset.keys()
1463 haslst = hasset.keys()
1462 haslst.sort(cmp_by_rev_func(revlog))
1464 haslst.sort(cmp_by_rev_func(revlog))
1463 for node in haslst:
1465 for node in haslst:
1464 parentlst = [p for p in revlog.parents(node) if p != nullid]
1466 parentlst = [p for p in revlog.parents(node) if p != nullid]
1465 while parentlst:
1467 while parentlst:
1466 n = parentlst.pop()
1468 n = parentlst.pop()
1467 if n not in hasset:
1469 if n not in hasset:
1468 hasset[n] = 1
1470 hasset[n] = 1
1469 p = [p for p in revlog.parents(n) if p != nullid]
1471 p = [p for p in revlog.parents(n) if p != nullid]
1470 parentlst.extend(p)
1472 parentlst.extend(p)
1471 for n in hasset:
1473 for n in hasset:
1472 msngset.pop(n, None)
1474 msngset.pop(n, None)
1473
1475
1474 # This is a function generating function used to set up an environment
1476 # This is a function generating function used to set up an environment
1475 # for the inner function to execute in.
1477 # for the inner function to execute in.
1476 def manifest_and_file_collector(changedfileset):
1478 def manifest_and_file_collector(changedfileset):
1477 # This is an information gathering function that gathers
1479 # This is an information gathering function that gathers
1478 # information from each changeset node that goes out as part of
1480 # information from each changeset node that goes out as part of
1479 # the changegroup. The information gathered is a list of which
1481 # the changegroup. The information gathered is a list of which
1480 # manifest nodes are potentially required (the recipient may
1482 # manifest nodes are potentially required (the recipient may
1481 # already have them) and total list of all files which were
1483 # already have them) and total list of all files which were
1482 # changed in any changeset in the changegroup.
1484 # changed in any changeset in the changegroup.
1483 #
1485 #
1484 # We also remember the first changenode we saw any manifest
1486 # We also remember the first changenode we saw any manifest
1485 # referenced by so we can later determine which changenode 'owns'
1487 # referenced by so we can later determine which changenode 'owns'
1486 # the manifest.
1488 # the manifest.
1487 def collect_manifests_and_files(clnode):
1489 def collect_manifests_and_files(clnode):
1488 c = cl.read(clnode)
1490 c = cl.read(clnode)
1489 for f in c[3]:
1491 for f in c[3]:
1490 # This is to make sure we only have one instance of each
1492 # This is to make sure we only have one instance of each
1491 # filename string for each filename.
1493 # filename string for each filename.
1492 changedfileset.setdefault(f, f)
1494 changedfileset.setdefault(f, f)
1493 msng_mnfst_set.setdefault(c[0], clnode)
1495 msng_mnfst_set.setdefault(c[0], clnode)
1494 return collect_manifests_and_files
1496 return collect_manifests_and_files
1495
1497
1496 # Figure out which manifest nodes (of the ones we think might be part
1498 # Figure out which manifest nodes (of the ones we think might be part
1497 # of the changegroup) the recipient must know about and remove them
1499 # of the changegroup) the recipient must know about and remove them
1498 # from the changegroup.
1500 # from the changegroup.
1499 def prune_manifests():
1501 def prune_manifests():
1500 has_mnfst_set = {}
1502 has_mnfst_set = {}
1501 for n in msng_mnfst_set:
1503 for n in msng_mnfst_set:
1502 # If a 'missing' manifest thinks it belongs to a changenode
1504 # If a 'missing' manifest thinks it belongs to a changenode
1503 # the recipient is assumed to have, obviously the recipient
1505 # the recipient is assumed to have, obviously the recipient
1504 # must have that manifest.
1506 # must have that manifest.
1505 linknode = cl.node(mnfst.linkrev(n))
1507 linknode = cl.node(mnfst.linkrev(n))
1506 if linknode in has_cl_set:
1508 if linknode in has_cl_set:
1507 has_mnfst_set[n] = 1
1509 has_mnfst_set[n] = 1
1508 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1510 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1509
1511
1510 # Use the information collected in collect_manifests_and_files to say
1512 # Use the information collected in collect_manifests_and_files to say
1511 # which changenode any manifestnode belongs to.
1513 # which changenode any manifestnode belongs to.
1512 def lookup_manifest_link(mnfstnode):
1514 def lookup_manifest_link(mnfstnode):
1513 return msng_mnfst_set[mnfstnode]
1515 return msng_mnfst_set[mnfstnode]
1514
1516
1515 # A function generating function that sets up the initial environment
1517 # A function generating function that sets up the initial environment
1516 # the inner function.
1518 # the inner function.
1517 def filenode_collector(changedfiles):
1519 def filenode_collector(changedfiles):
1518 next_rev = [0]
1520 next_rev = [0]
1519 # This gathers information from each manifestnode included in the
1521 # This gathers information from each manifestnode included in the
1520 # changegroup about which filenodes the manifest node references
1522 # changegroup about which filenodes the manifest node references
1521 # so we can include those in the changegroup too.
1523 # so we can include those in the changegroup too.
1522 #
1524 #
1523 # It also remembers which changenode each filenode belongs to. It
1525 # It also remembers which changenode each filenode belongs to. It
1524 # does this by assuming the a filenode belongs to the changenode
1526 # does this by assuming the a filenode belongs to the changenode
1525 # the first manifest that references it belongs to.
1527 # the first manifest that references it belongs to.
1526 def collect_msng_filenodes(mnfstnode):
1528 def collect_msng_filenodes(mnfstnode):
1527 r = mnfst.rev(mnfstnode)
1529 r = mnfst.rev(mnfstnode)
1528 if r == next_rev[0]:
1530 if r == next_rev[0]:
1529 # If the last rev we looked at was the one just previous,
1531 # If the last rev we looked at was the one just previous,
1530 # we only need to see a diff.
1532 # we only need to see a diff.
1531 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1533 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1532 # For each line in the delta
1534 # For each line in the delta
1533 for dline in delta.splitlines():
1535 for dline in delta.splitlines():
1534 # get the filename and filenode for that line
1536 # get the filename and filenode for that line
1535 f, fnode = dline.split('\0')
1537 f, fnode = dline.split('\0')
1536 fnode = bin(fnode[:40])
1538 fnode = bin(fnode[:40])
1537 f = changedfiles.get(f, None)
1539 f = changedfiles.get(f, None)
1538 # And if the file is in the list of files we care
1540 # And if the file is in the list of files we care
1539 # about.
1541 # about.
1540 if f is not None:
1542 if f is not None:
1541 # Get the changenode this manifest belongs to
1543 # Get the changenode this manifest belongs to
1542 clnode = msng_mnfst_set[mnfstnode]
1544 clnode = msng_mnfst_set[mnfstnode]
1543 # Create the set of filenodes for the file if
1545 # Create the set of filenodes for the file if
1544 # there isn't one already.
1546 # there isn't one already.
1545 ndset = msng_filenode_set.setdefault(f, {})
1547 ndset = msng_filenode_set.setdefault(f, {})
1546 # And set the filenode's changelog node to the
1548 # And set the filenode's changelog node to the
1547 # manifest's if it hasn't been set already.
1549 # manifest's if it hasn't been set already.
1548 ndset.setdefault(fnode, clnode)
1550 ndset.setdefault(fnode, clnode)
1549 else:
1551 else:
1550 # Otherwise we need a full manifest.
1552 # Otherwise we need a full manifest.
1551 m = mnfst.read(mnfstnode)
1553 m = mnfst.read(mnfstnode)
1552 # For every file in we care about.
1554 # For every file in we care about.
1553 for f in changedfiles:
1555 for f in changedfiles:
1554 fnode = m.get(f, None)
1556 fnode = m.get(f, None)
1555 # If it's in the manifest
1557 # If it's in the manifest
1556 if fnode is not None:
1558 if fnode is not None:
1557 # See comments above.
1559 # See comments above.
1558 clnode = msng_mnfst_set[mnfstnode]
1560 clnode = msng_mnfst_set[mnfstnode]
1559 ndset = msng_filenode_set.setdefault(f, {})
1561 ndset = msng_filenode_set.setdefault(f, {})
1560 ndset.setdefault(fnode, clnode)
1562 ndset.setdefault(fnode, clnode)
1561 # Remember the revision we hope to see next.
1563 # Remember the revision we hope to see next.
1562 next_rev[0] = r + 1
1564 next_rev[0] = r + 1
1563 return collect_msng_filenodes
1565 return collect_msng_filenodes
1564
1566
1565 # We have a list of filenodes we think we need for a file, lets remove
1567 # We have a list of filenodes we think we need for a file, lets remove
1566 # all those we now the recipient must have.
1568 # all those we now the recipient must have.
1567 def prune_filenodes(f, filerevlog):
1569 def prune_filenodes(f, filerevlog):
1568 msngset = msng_filenode_set[f]
1570 msngset = msng_filenode_set[f]
1569 hasset = {}
1571 hasset = {}
1570 # If a 'missing' filenode thinks it belongs to a changenode we
1572 # If a 'missing' filenode thinks it belongs to a changenode we
1571 # assume the recipient must have, then the recipient must have
1573 # assume the recipient must have, then the recipient must have
1572 # that filenode.
1574 # that filenode.
1573 for n in msngset:
1575 for n in msngset:
1574 clnode = cl.node(filerevlog.linkrev(n))
1576 clnode = cl.node(filerevlog.linkrev(n))
1575 if clnode in has_cl_set:
1577 if clnode in has_cl_set:
1576 hasset[n] = 1
1578 hasset[n] = 1
1577 prune_parents(filerevlog, hasset, msngset)
1579 prune_parents(filerevlog, hasset, msngset)
1578
1580
1579 # A function generator function that sets up the a context for the
1581 # A function generator function that sets up the a context for the
1580 # inner function.
1582 # inner function.
1581 def lookup_filenode_link_func(fname):
1583 def lookup_filenode_link_func(fname):
1582 msngset = msng_filenode_set[fname]
1584 msngset = msng_filenode_set[fname]
1583 # Lookup the changenode the filenode belongs to.
1585 # Lookup the changenode the filenode belongs to.
1584 def lookup_filenode_link(fnode):
1586 def lookup_filenode_link(fnode):
1585 return msngset[fnode]
1587 return msngset[fnode]
1586 return lookup_filenode_link
1588 return lookup_filenode_link
1587
1589
1588 # Now that we have all theses utility functions to help out and
1590 # Now that we have all theses utility functions to help out and
1589 # logically divide up the task, generate the group.
1591 # logically divide up the task, generate the group.
1590 def gengroup():
1592 def gengroup():
1591 # The set of changed files starts empty.
1593 # The set of changed files starts empty.
1592 changedfiles = {}
1594 changedfiles = {}
1593 # Create a changenode group generator that will call our functions
1595 # Create a changenode group generator that will call our functions
1594 # back to lookup the owning changenode and collect information.
1596 # back to lookup the owning changenode and collect information.
1595 group = cl.group(msng_cl_lst, identity,
1597 group = cl.group(msng_cl_lst, identity,
1596 manifest_and_file_collector(changedfiles))
1598 manifest_and_file_collector(changedfiles))
1597 for chnk in group:
1599 for chnk in group:
1598 yield chnk
1600 yield chnk
1599
1601
1600 # The list of manifests has been collected by the generator
1602 # The list of manifests has been collected by the generator
1601 # calling our functions back.
1603 # calling our functions back.
1602 prune_manifests()
1604 prune_manifests()
1603 msng_mnfst_lst = msng_mnfst_set.keys()
1605 msng_mnfst_lst = msng_mnfst_set.keys()
1604 # Sort the manifestnodes by revision number.
1606 # Sort the manifestnodes by revision number.
1605 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1607 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1606 # Create a generator for the manifestnodes that calls our lookup
1608 # Create a generator for the manifestnodes that calls our lookup
1607 # and data collection functions back.
1609 # and data collection functions back.
1608 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1610 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1609 filenode_collector(changedfiles))
1611 filenode_collector(changedfiles))
1610 for chnk in group:
1612 for chnk in group:
1611 yield chnk
1613 yield chnk
1612
1614
1613 # These are no longer needed, dereference and toss the memory for
1615 # These are no longer needed, dereference and toss the memory for
1614 # them.
1616 # them.
1615 msng_mnfst_lst = None
1617 msng_mnfst_lst = None
1616 msng_mnfst_set.clear()
1618 msng_mnfst_set.clear()
1617
1619
1618 changedfiles = changedfiles.keys()
1620 changedfiles = changedfiles.keys()
1619 changedfiles.sort()
1621 changedfiles.sort()
1620 # Go through all our files in order sorted by name.
1622 # Go through all our files in order sorted by name.
1621 for fname in changedfiles:
1623 for fname in changedfiles:
1622 filerevlog = self.file(fname)
1624 filerevlog = self.file(fname)
1623 # Toss out the filenodes that the recipient isn't really
1625 # Toss out the filenodes that the recipient isn't really
1624 # missing.
1626 # missing.
1625 if msng_filenode_set.has_key(fname):
1627 if msng_filenode_set.has_key(fname):
1626 prune_filenodes(fname, filerevlog)
1628 prune_filenodes(fname, filerevlog)
1627 msng_filenode_lst = msng_filenode_set[fname].keys()
1629 msng_filenode_lst = msng_filenode_set[fname].keys()
1628 else:
1630 else:
1629 msng_filenode_lst = []
1631 msng_filenode_lst = []
1630 # If any filenodes are left, generate the group for them,
1632 # If any filenodes are left, generate the group for them,
1631 # otherwise don't bother.
1633 # otherwise don't bother.
1632 if len(msng_filenode_lst) > 0:
1634 if len(msng_filenode_lst) > 0:
1633 yield changegroup.genchunk(fname)
1635 yield changegroup.genchunk(fname)
1634 # Sort the filenodes by their revision #
1636 # Sort the filenodes by their revision #
1635 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1637 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1636 # Create a group generator and only pass in a changenode
1638 # Create a group generator and only pass in a changenode
1637 # lookup function as we need to collect no information
1639 # lookup function as we need to collect no information
1638 # from filenodes.
1640 # from filenodes.
1639 group = filerevlog.group(msng_filenode_lst,
1641 group = filerevlog.group(msng_filenode_lst,
1640 lookup_filenode_link_func(fname))
1642 lookup_filenode_link_func(fname))
1641 for chnk in group:
1643 for chnk in group:
1642 yield chnk
1644 yield chnk
1643 if msng_filenode_set.has_key(fname):
1645 if msng_filenode_set.has_key(fname):
1644 # Don't need this anymore, toss it to free memory.
1646 # Don't need this anymore, toss it to free memory.
1645 del msng_filenode_set[fname]
1647 del msng_filenode_set[fname]
1646 # Signal that no more groups are left.
1648 # Signal that no more groups are left.
1647 yield changegroup.closechunk()
1649 yield changegroup.closechunk()
1648
1650
1649 if msng_cl_lst:
1651 if msng_cl_lst:
1650 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1652 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1651
1653
1652 return util.chunkbuffer(gengroup())
1654 return util.chunkbuffer(gengroup())
1653
1655
1654 def changegroup(self, basenodes, source):
1656 def changegroup(self, basenodes, source):
1655 """Generate a changegroup of all nodes that we have that a recipient
1657 """Generate a changegroup of all nodes that we have that a recipient
1656 doesn't.
1658 doesn't.
1657
1659
1658 This is much easier than the previous function as we can assume that
1660 This is much easier than the previous function as we can assume that
1659 the recipient has any changenode we aren't sending them."""
1661 the recipient has any changenode we aren't sending them."""
1660
1662
1661 self.hook('preoutgoing', throw=True, source=source)
1663 self.hook('preoutgoing', throw=True, source=source)
1662
1664
1663 cl = self.changelog
1665 cl = self.changelog
1664 nodes = cl.nodesbetween(basenodes, None)[0]
1666 nodes = cl.nodesbetween(basenodes, None)[0]
1665 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1667 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1666 self.changegroupinfo(nodes)
1668 self.changegroupinfo(nodes)
1667
1669
1668 def identity(x):
1670 def identity(x):
1669 return x
1671 return x
1670
1672
1671 def gennodelst(revlog):
1673 def gennodelst(revlog):
1672 for r in xrange(0, revlog.count()):
1674 for r in xrange(0, revlog.count()):
1673 n = revlog.node(r)
1675 n = revlog.node(r)
1674 if revlog.linkrev(n) in revset:
1676 if revlog.linkrev(n) in revset:
1675 yield n
1677 yield n
1676
1678
1677 def changed_file_collector(changedfileset):
1679 def changed_file_collector(changedfileset):
1678 def collect_changed_files(clnode):
1680 def collect_changed_files(clnode):
1679 c = cl.read(clnode)
1681 c = cl.read(clnode)
1680 for fname in c[3]:
1682 for fname in c[3]:
1681 changedfileset[fname] = 1
1683 changedfileset[fname] = 1
1682 return collect_changed_files
1684 return collect_changed_files
1683
1685
1684 def lookuprevlink_func(revlog):
1686 def lookuprevlink_func(revlog):
1685 def lookuprevlink(n):
1687 def lookuprevlink(n):
1686 return cl.node(revlog.linkrev(n))
1688 return cl.node(revlog.linkrev(n))
1687 return lookuprevlink
1689 return lookuprevlink
1688
1690
1689 def gengroup():
1691 def gengroup():
1690 # construct a list of all changed files
1692 # construct a list of all changed files
1691 changedfiles = {}
1693 changedfiles = {}
1692
1694
1693 for chnk in cl.group(nodes, identity,
1695 for chnk in cl.group(nodes, identity,
1694 changed_file_collector(changedfiles)):
1696 changed_file_collector(changedfiles)):
1695 yield chnk
1697 yield chnk
1696 changedfiles = changedfiles.keys()
1698 changedfiles = changedfiles.keys()
1697 changedfiles.sort()
1699 changedfiles.sort()
1698
1700
1699 mnfst = self.manifest
1701 mnfst = self.manifest
1700 nodeiter = gennodelst(mnfst)
1702 nodeiter = gennodelst(mnfst)
1701 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1703 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1702 yield chnk
1704 yield chnk
1703
1705
1704 for fname in changedfiles:
1706 for fname in changedfiles:
1705 filerevlog = self.file(fname)
1707 filerevlog = self.file(fname)
1706 nodeiter = gennodelst(filerevlog)
1708 nodeiter = gennodelst(filerevlog)
1707 nodeiter = list(nodeiter)
1709 nodeiter = list(nodeiter)
1708 if nodeiter:
1710 if nodeiter:
1709 yield changegroup.genchunk(fname)
1711 yield changegroup.genchunk(fname)
1710 lookup = lookuprevlink_func(filerevlog)
1712 lookup = lookuprevlink_func(filerevlog)
1711 for chnk in filerevlog.group(nodeiter, lookup):
1713 for chnk in filerevlog.group(nodeiter, lookup):
1712 yield chnk
1714 yield chnk
1713
1715
1714 yield changegroup.closechunk()
1716 yield changegroup.closechunk()
1715
1717
1716 if nodes:
1718 if nodes:
1717 self.hook('outgoing', node=hex(nodes[0]), source=source)
1719 self.hook('outgoing', node=hex(nodes[0]), source=source)
1718
1720
1719 return util.chunkbuffer(gengroup())
1721 return util.chunkbuffer(gengroup())
1720
1722
1721 def addchangegroup(self, source, srctype, url):
1723 def addchangegroup(self, source, srctype, url):
1722 """add changegroup to repo.
1724 """add changegroup to repo.
1723 returns number of heads modified or added + 1."""
1725 returns number of heads modified or added + 1."""
1724
1726
1725 def csmap(x):
1727 def csmap(x):
1726 self.ui.debug(_("add changeset %s\n") % short(x))
1728 self.ui.debug(_("add changeset %s\n") % short(x))
1727 return cl.count()
1729 return cl.count()
1728
1730
1729 def revmap(x):
1731 def revmap(x):
1730 return cl.rev(x)
1732 return cl.rev(x)
1731
1733
1732 if not source:
1734 if not source:
1733 return 0
1735 return 0
1734
1736
1735 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1737 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1736
1738
1737 changesets = files = revisions = 0
1739 changesets = files = revisions = 0
1738
1740
1739 tr = self.transaction()
1741 tr = self.transaction()
1740
1742
1741 # write changelog data to temp files so concurrent readers will not see
1743 # write changelog data to temp files so concurrent readers will not see
1742 # inconsistent view
1744 # inconsistent view
1743 cl = None
1745 cl = None
1744 try:
1746 try:
1745 cl = appendfile.appendchangelog(self.sopener,
1747 cl = appendfile.appendchangelog(self.sopener,
1746 self.changelog.version)
1748 self.changelog.version)
1747
1749
1748 oldheads = len(cl.heads())
1750 oldheads = len(cl.heads())
1749
1751
1750 # pull off the changeset group
1752 # pull off the changeset group
1751 self.ui.status(_("adding changesets\n"))
1753 self.ui.status(_("adding changesets\n"))
1752 cor = cl.count() - 1
1754 cor = cl.count() - 1
1753 chunkiter = changegroup.chunkiter(source)
1755 chunkiter = changegroup.chunkiter(source)
1754 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1756 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1755 raise util.Abort(_("received changelog group is empty"))
1757 raise util.Abort(_("received changelog group is empty"))
1756 cnr = cl.count() - 1
1758 cnr = cl.count() - 1
1757 changesets = cnr - cor
1759 changesets = cnr - cor
1758
1760
1759 # pull off the manifest group
1761 # pull off the manifest group
1760 self.ui.status(_("adding manifests\n"))
1762 self.ui.status(_("adding manifests\n"))
1761 chunkiter = changegroup.chunkiter(source)
1763 chunkiter = changegroup.chunkiter(source)
1762 # no need to check for empty manifest group here:
1764 # no need to check for empty manifest group here:
1763 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1765 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1764 # no new manifest will be created and the manifest group will
1766 # no new manifest will be created and the manifest group will
1765 # be empty during the pull
1767 # be empty during the pull
1766 self.manifest.addgroup(chunkiter, revmap, tr)
1768 self.manifest.addgroup(chunkiter, revmap, tr)
1767
1769
1768 # process the files
1770 # process the files
1769 self.ui.status(_("adding file changes\n"))
1771 self.ui.status(_("adding file changes\n"))
1770 while 1:
1772 while 1:
1771 f = changegroup.getchunk(source)
1773 f = changegroup.getchunk(source)
1772 if not f:
1774 if not f:
1773 break
1775 break
1774 self.ui.debug(_("adding %s revisions\n") % f)
1776 self.ui.debug(_("adding %s revisions\n") % f)
1775 fl = self.file(f)
1777 fl = self.file(f)
1776 o = fl.count()
1778 o = fl.count()
1777 chunkiter = changegroup.chunkiter(source)
1779 chunkiter = changegroup.chunkiter(source)
1778 if fl.addgroup(chunkiter, revmap, tr) is None:
1780 if fl.addgroup(chunkiter, revmap, tr) is None:
1779 raise util.Abort(_("received file revlog group is empty"))
1781 raise util.Abort(_("received file revlog group is empty"))
1780 revisions += fl.count() - o
1782 revisions += fl.count() - o
1781 files += 1
1783 files += 1
1782
1784
1783 cl.writedata()
1785 cl.writedata()
1784 finally:
1786 finally:
1785 if cl:
1787 if cl:
1786 cl.cleanup()
1788 cl.cleanup()
1787
1789
1788 # make changelog see real files again
1790 # make changelog see real files again
1789 self.changelog = changelog.changelog(self.sopener,
1791 self.changelog = changelog.changelog(self.sopener,
1790 self.changelog.version)
1792 self.changelog.version)
1791 self.changelog.checkinlinesize(tr)
1793 self.changelog.checkinlinesize(tr)
1792
1794
1793 newheads = len(self.changelog.heads())
1795 newheads = len(self.changelog.heads())
1794 heads = ""
1796 heads = ""
1795 if oldheads and newheads != oldheads:
1797 if oldheads and newheads != oldheads:
1796 heads = _(" (%+d heads)") % (newheads - oldheads)
1798 heads = _(" (%+d heads)") % (newheads - oldheads)
1797
1799
1798 self.ui.status(_("added %d changesets"
1800 self.ui.status(_("added %d changesets"
1799 " with %d changes to %d files%s\n")
1801 " with %d changes to %d files%s\n")
1800 % (changesets, revisions, files, heads))
1802 % (changesets, revisions, files, heads))
1801
1803
1802 if changesets > 0:
1804 if changesets > 0:
1803 self.hook('pretxnchangegroup', throw=True,
1805 self.hook('pretxnchangegroup', throw=True,
1804 node=hex(self.changelog.node(cor+1)), source=srctype,
1806 node=hex(self.changelog.node(cor+1)), source=srctype,
1805 url=url)
1807 url=url)
1806
1808
1807 tr.close()
1809 tr.close()
1808
1810
1809 if changesets > 0:
1811 if changesets > 0:
1810 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1812 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1811 source=srctype, url=url)
1813 source=srctype, url=url)
1812
1814
1813 for i in xrange(cor + 1, cnr + 1):
1815 for i in xrange(cor + 1, cnr + 1):
1814 self.hook("incoming", node=hex(self.changelog.node(i)),
1816 self.hook("incoming", node=hex(self.changelog.node(i)),
1815 source=srctype, url=url)
1817 source=srctype, url=url)
1816
1818
1817 return newheads - oldheads + 1
1819 return newheads - oldheads + 1
1818
1820
1819
1821
1820 def stream_in(self, remote):
1822 def stream_in(self, remote):
1821 fp = remote.stream_out()
1823 fp = remote.stream_out()
1822 l = fp.readline()
1824 l = fp.readline()
1823 try:
1825 try:
1824 resp = int(l)
1826 resp = int(l)
1825 except ValueError:
1827 except ValueError:
1826 raise util.UnexpectedOutput(
1828 raise util.UnexpectedOutput(
1827 _('Unexpected response from remote server:'), l)
1829 _('Unexpected response from remote server:'), l)
1828 if resp == 1:
1830 if resp == 1:
1829 raise util.Abort(_('operation forbidden by server'))
1831 raise util.Abort(_('operation forbidden by server'))
1830 elif resp == 2:
1832 elif resp == 2:
1831 raise util.Abort(_('locking the remote repository failed'))
1833 raise util.Abort(_('locking the remote repository failed'))
1832 elif resp != 0:
1834 elif resp != 0:
1833 raise util.Abort(_('the server sent an unknown error code'))
1835 raise util.Abort(_('the server sent an unknown error code'))
1834 self.ui.status(_('streaming all changes\n'))
1836 self.ui.status(_('streaming all changes\n'))
1835 l = fp.readline()
1837 l = fp.readline()
1836 try:
1838 try:
1837 total_files, total_bytes = map(int, l.split(' ', 1))
1839 total_files, total_bytes = map(int, l.split(' ', 1))
1838 except ValueError, TypeError:
1840 except ValueError, TypeError:
1839 raise util.UnexpectedOutput(
1841 raise util.UnexpectedOutput(
1840 _('Unexpected response from remote server:'), l)
1842 _('Unexpected response from remote server:'), l)
1841 self.ui.status(_('%d files to transfer, %s of data\n') %
1843 self.ui.status(_('%d files to transfer, %s of data\n') %
1842 (total_files, util.bytecount(total_bytes)))
1844 (total_files, util.bytecount(total_bytes)))
1843 start = time.time()
1845 start = time.time()
1844 for i in xrange(total_files):
1846 for i in xrange(total_files):
1845 # XXX doesn't support '\n' or '\r' in filenames
1847 # XXX doesn't support '\n' or '\r' in filenames
1846 l = fp.readline()
1848 l = fp.readline()
1847 try:
1849 try:
1848 name, size = l.split('\0', 1)
1850 name, size = l.split('\0', 1)
1849 size = int(size)
1851 size = int(size)
1850 except ValueError, TypeError:
1852 except ValueError, TypeError:
1851 raise util.UnexpectedOutput(
1853 raise util.UnexpectedOutput(
1852 _('Unexpected response from remote server:'), l)
1854 _('Unexpected response from remote server:'), l)
1853 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1855 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1854 ofp = self.sopener(name, 'w')
1856 ofp = self.sopener(name, 'w')
1855 for chunk in util.filechunkiter(fp, limit=size):
1857 for chunk in util.filechunkiter(fp, limit=size):
1856 ofp.write(chunk)
1858 ofp.write(chunk)
1857 ofp.close()
1859 ofp.close()
1858 elapsed = time.time() - start
1860 elapsed = time.time() - start
1859 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1861 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1860 (util.bytecount(total_bytes), elapsed,
1862 (util.bytecount(total_bytes), elapsed,
1861 util.bytecount(total_bytes / elapsed)))
1863 util.bytecount(total_bytes / elapsed)))
1862 self.reload()
1864 self.reload()
1863 return len(self.heads()) + 1
1865 return len(self.heads()) + 1
1864
1866
1865 def clone(self, remote, heads=[], stream=False):
1867 def clone(self, remote, heads=[], stream=False):
1866 '''clone remote repository.
1868 '''clone remote repository.
1867
1869
1868 keyword arguments:
1870 keyword arguments:
1869 heads: list of revs to clone (forces use of pull)
1871 heads: list of revs to clone (forces use of pull)
1870 stream: use streaming clone if possible'''
1872 stream: use streaming clone if possible'''
1871
1873
1872 # now, all clients that can request uncompressed clones can
1874 # now, all clients that can request uncompressed clones can
1873 # read repo formats supported by all servers that can serve
1875 # read repo formats supported by all servers that can serve
1874 # them.
1876 # them.
1875
1877
1876 # if revlog format changes, client will have to check version
1878 # if revlog format changes, client will have to check version
1877 # and format flags on "stream" capability, and use
1879 # and format flags on "stream" capability, and use
1878 # uncompressed only if compatible.
1880 # uncompressed only if compatible.
1879
1881
1880 if stream and not heads and remote.capable('stream'):
1882 if stream and not heads and remote.capable('stream'):
1881 return self.stream_in(remote)
1883 return self.stream_in(remote)
1882 return self.pull(remote, heads)
1884 return self.pull(remote, heads)
1883
1885
1884 # used to avoid circular references so destructors work
1886 # used to avoid circular references so destructors work
1885 def aftertrans(base):
1887 def aftertrans(base):
1886 p = base
1888 p = base
1887 def a():
1889 def a():
1888 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1890 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1889 util.rename(os.path.join(p, "journal.dirstate"),
1891 util.rename(os.path.join(p, "journal.dirstate"),
1890 os.path.join(p, "undo.dirstate"))
1892 os.path.join(p, "undo.dirstate"))
1891 return a
1893 return a
1892
1894
1893 def instance(ui, path, create):
1895 def instance(ui, path, create):
1894 return localrepository(ui, util.drop_scheme('file', path), create)
1896 return localrepository(ui, util.drop_scheme('file', path), create)
1895
1897
1896 def islocal(path):
1898 def islocal(path):
1897 return True
1899 return True
@@ -1,408 +1,483 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 demandload(globals(), "errno util os tempfile")
11 demandload(globals(), "errno util os tempfile")
12
12
13 def filemerge(repo, fw, fo, wctx, mctx):
13 def filemerge(repo, fw, fo, wctx, mctx):
14 """perform a 3-way merge in the working directory
14 """perform a 3-way merge in the working directory
15
15
16 fw = filename in the working directory
16 fw = filename in the working directory
17 fo = filename in other parent
17 fo = filename in other parent
18 wctx, mctx = working and merge changecontexts
18 wctx, mctx = working and merge changecontexts
19 """
19 """
20
20
21 def temp(prefix, ctx):
21 def temp(prefix, ctx):
22 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
22 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
23 (fd, name) = tempfile.mkstemp(prefix=pre)
23 (fd, name) = tempfile.mkstemp(prefix=pre)
24 f = os.fdopen(fd, "wb")
24 f = os.fdopen(fd, "wb")
25 repo.wwrite(ctx.path(), ctx.data(), f)
25 repo.wwrite(ctx.path(), ctx.data(), f)
26 f.close()
26 f.close()
27 return name
27 return name
28
28
29 fcm = wctx.filectx(fw)
29 fcm = wctx.filectx(fw)
30 fco = mctx.filectx(fo)
30 fco = mctx.filectx(fo)
31
31
32 if not fco.cmp(fcm.data()): # files identical?
32 if not fco.cmp(fcm.data()): # files identical?
33 return None
33 return None
34
34
35 fca = fcm.ancestor(fco)
35 fca = fcm.ancestor(fco)
36 if not fca:
36 if not fca:
37 fca = repo.filectx(fw, fileid=nullrev)
37 fca = repo.filectx(fw, fileid=nullrev)
38 a = repo.wjoin(fw)
38 a = repo.wjoin(fw)
39 b = temp("base", fca)
39 b = temp("base", fca)
40 c = temp("other", fco)
40 c = temp("other", fco)
41
41
42 if fw != fo:
42 if fw != fo:
43 repo.ui.status(_("merging %s and %s\n") % (fw, fo))
43 repo.ui.status(_("merging %s and %s\n") % (fw, fo))
44 else:
44 else:
45 repo.ui.status(_("merging %s\n") % fw)
45 repo.ui.status(_("merging %s\n") % fw)
46
46
47 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcm, fco, fca))
47 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcm, fco, fca))
48
48
49 cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge")
49 cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge")
50 or "hgmerge")
50 or "hgmerge")
51 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root,
51 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root,
52 environ={'HG_FILE': fw,
52 environ={'HG_FILE': fw,
53 'HG_MY_NODE': str(wctx.parents()[0]),
53 'HG_MY_NODE': str(wctx.parents()[0]),
54 'HG_OTHER_NODE': str(mctx)})
54 'HG_OTHER_NODE': str(mctx)})
55 if r:
55 if r:
56 repo.ui.warn(_("merging %s failed!\n") % fw)
56 repo.ui.warn(_("merging %s failed!\n") % fw)
57
57
58 os.unlink(b)
58 os.unlink(b)
59 os.unlink(c)
59 os.unlink(c)
60 return r
60 return r
61
61
62 def checkunknown(wctx, mctx):
62 def checkunknown(wctx, mctx):
63 "check for collisions between unknown files and files in mctx"
63 "check for collisions between unknown files and files in mctx"
64 man = mctx.manifest()
64 man = mctx.manifest()
65 for f in wctx.unknown():
65 for f in wctx.unknown():
66 if f in man:
66 if f in man:
67 if mctx.filectx(f).cmp(wctx.filectx(f).data()):
67 if mctx.filectx(f).cmp(wctx.filectx(f).data()):
68 raise util.Abort(_("untracked local file '%s' differs"\
68 raise util.Abort(_("untracked local file '%s' differs"\
69 " from remote version") % f)
69 " from remote version") % f)
70
70
71 def forgetremoved(wctx, mctx):
71 def forgetremoved(wctx, mctx):
72 """
72 """
73 Forget removed files
73 Forget removed files
74
74
75 If we're jumping between revisions (as opposed to merging), and if
75 If we're jumping between revisions (as opposed to merging), and if
76 neither the working directory nor the target rev has the file,
76 neither the working directory nor the target rev has the file,
77 then we need to remove it from the dirstate, to prevent the
77 then we need to remove it from the dirstate, to prevent the
78 dirstate from listing the file when it is no longer in the
78 dirstate from listing the file when it is no longer in the
79 manifest.
79 manifest.
80 """
80 """
81
81
82 action = []
82 action = []
83 man = mctx.manifest()
83 man = mctx.manifest()
84 for f in wctx.deleted() + wctx.removed():
84 for f in wctx.deleted() + wctx.removed():
85 if f not in man:
85 if f not in man:
86 action.append((f, "f"))
86 action.append((f, "f"))
87
87
88 return action
88 return action
89
89
90 def findcopies(repo, m1, m2, ma, limit):
90 def findcopies(repo, m1, m2, ma, limit):
91 """
91 """
92 Find moves and copies between m1 and m2 back to limit linkrev
92 Find moves and copies between m1 and m2 back to limit linkrev
93 """
93 """
94
94
95 def findold(fctx):
95 def findold(fctx):
96 "find files that path was copied from, back to linkrev limit"
96 "find files that path was copied from, back to linkrev limit"
97 old = {}
97 old = {}
98 orig = fctx.path()
98 orig = fctx.path()
99 visit = [fctx]
99 visit = [fctx]
100 while visit:
100 while visit:
101 fc = visit.pop()
101 fc = visit.pop()
102 if fc.rev() < limit:
102 if fc.rev() < limit:
103 continue
103 continue
104 if fc.path() != orig and fc.path() not in old:
104 if fc.path() != orig and fc.path() not in old:
105 old[fc.path()] = 1
105 old[fc.path()] = 1
106 visit += fc.parents()
106 visit += fc.parents()
107
107
108 old = old.keys()
108 old = old.keys()
109 old.sort()
109 old.sort()
110 return old
110 return old
111
111
112 def nonoverlap(d1, d2, d3):
112 def nonoverlap(d1, d2, d3):
113 "Return list of elements in d1 not in d2 or d3"
113 "Return list of elements in d1 not in d2 or d3"
114 l = [d for d in d1 if d not in d3 and d not in d2]
114 l = [d for d in d1 if d not in d3 and d not in d2]
115 l.sort()
115 l.sort()
116 return l
116 return l
117
117
118 def checkcopies(c, man):
118 def checkcopies(c, man):
119 '''check possible copies for filectx c'''
119 '''check possible copies for filectx c'''
120 for of in findold(c):
120 for of in findold(c):
121 if of not in man:
121 if of not in man:
122 return
122 return
123 c2 = ctx(of, man[of])
123 c2 = ctx(of, man[of])
124 ca = c.ancestor(c2)
124 ca = c.ancestor(c2)
125 if not ca or c == ca or c2 == ca:
125 if not ca: # unrelated
126 return
126 return
127 if ca.path() == c.path() or ca.path() == c2.path():
127 if ca.path() == c.path() or ca.path() == c2.path():
128 fullcopy[c.path()] = of
129 if c == ca or c2 == ca: # no merge needed, ignore copy
130 return
128 copy[c.path()] = of
131 copy[c.path()] = of
129
132
133 def dirs(files):
134 d = {}
135 for f in files:
136 d[os.path.dirname(f)] = True
137 return d
138
130 if not repo.ui.configbool("merge", "followcopies", True):
139 if not repo.ui.configbool("merge", "followcopies", True):
131 return {}
140 return {}
132
141
133 # avoid silly behavior for update from empty dir
142 # avoid silly behavior for update from empty dir
134 if not m1 or not m2 or not ma:
143 if not m1 or not m2 or not ma:
135 return {}
144 return {}
136
145
137 dcopies = repo.dirstate.copies()
146 dcopies = repo.dirstate.copies()
138 copy = {}
147 copy = {}
148 fullcopy = {}
139 u1 = nonoverlap(m1, m2, ma)
149 u1 = nonoverlap(m1, m2, ma)
140 u2 = nonoverlap(m2, m1, ma)
150 u2 = nonoverlap(m2, m1, ma)
141 ctx = util.cachefunc(lambda f, n: repo.filectx(f, fileid=n[:20]))
151 ctx = util.cachefunc(lambda f, n: repo.filectx(f, fileid=n[:20]))
142
152
143 for f in u1:
153 for f in u1:
144 checkcopies(ctx(dcopies.get(f, f), m1[f]), m2)
154 checkcopies(ctx(dcopies.get(f, f), m1[f]), m2)
145
155
146 for f in u2:
156 for f in u2:
147 checkcopies(ctx(f, m2[f]), m1)
157 checkcopies(ctx(f, m2[f]), m1)
148
158
159 if not fullcopy or not repo.ui.configbool("merge", "followdirs", True):
160 return copy
161
162 # generate a directory move map
163 d1, d2 = dirs(m1), dirs(m2)
164 invalid = {}
165 dirmove = {}
166
167 for dst, src in fullcopy.items():
168 dsrc, ddst = os.path.dirname(src), os.path.dirname(dst)
169 if dsrc in invalid:
170 continue
171 elif (dsrc in d1 and ddst in d1) or (dsrc in d2 and ddst in d2):
172 invalid[dsrc] = True
173 elif dsrc in dirmove and dirmove[dsrc] != ddst:
174 invalid[dsrc] = True
175 del dirmove[dsrc]
176 else:
177 dirmove[dsrc] = ddst
178
179 del d1, d2, invalid
180
181 if not dirmove:
182 return copy
183
184 # check unaccounted nonoverlapping files
185 for f in u1 + u2:
186 if f not in fullcopy:
187 d = os.path.dirname(f)
188 if d in dirmove:
189 copy[f] = dirmove[d] + "/" + os.path.basename(f)
190
149 return copy
191 return copy
150
192
151 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
193 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
152 """
194 """
153 Merge p1 and p2 with ancestor ma and generate merge action list
195 Merge p1 and p2 with ancestor ma and generate merge action list
154
196
155 overwrite = whether we clobber working files
197 overwrite = whether we clobber working files
156 partial = function to filter file lists
198 partial = function to filter file lists
157 """
199 """
158
200
159 repo.ui.note(_("resolving manifests\n"))
201 repo.ui.note(_("resolving manifests\n"))
160 repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial)))
202 repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial)))
161 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2))
203 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2))
162
204
163 m1 = p1.manifest()
205 m1 = p1.manifest()
164 m2 = p2.manifest()
206 m2 = p2.manifest()
165 ma = pa.manifest()
207 ma = pa.manifest()
166 backwards = (pa == p2)
208 backwards = (pa == p2)
167 action = []
209 action = []
168 copy = {}
210 copy = {}
169
211
170 def fmerge(f, f2=None, fa=None):
212 def fmerge(f, f2=None, fa=None):
171 """merge executable flags"""
213 """merge executable flags"""
172 if not f2:
214 if not f2:
173 f2 = f
215 f2 = f
174 fa = f
216 fa = f
175 a, b, c = ma.execf(fa), m1.execf(f), m2.execf(f2)
217 a, b, c = ma.execf(fa), m1.execf(f), m2.execf(f2)
176 return ((a^b) | (a^c)) ^ a
218 return ((a^b) | (a^c)) ^ a
177
219
178 def act(msg, m, f, *args):
220 def act(msg, m, f, *args):
179 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
221 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
180 action.append((f, m) + args)
222 action.append((f, m) + args)
181
223
182 if not (backwards or overwrite):
224 if not (backwards or overwrite):
183 copy = findcopies(repo, m1, m2, ma, pa.rev())
225 copy = findcopies(repo, m1, m2, ma, pa.rev())
184 copied = dict.fromkeys(copy.values())
226 copied = dict.fromkeys(copy.values())
185
227
186 # Compare manifests
228 # Compare manifests
187 for f, n in m1.iteritems():
229 for f, n in m1.iteritems():
188 if partial and not partial(f):
230 if partial and not partial(f):
189 continue
231 continue
190 if f in m2:
232 if f in m2:
191 # are files different?
233 # are files different?
192 if n != m2[f]:
234 if n != m2[f]:
193 a = ma.get(f, nullid)
235 a = ma.get(f, nullid)
194 # are both different from the ancestor?
236 # are both different from the ancestor?
195 if not overwrite and n != a and m2[f] != a:
237 if not overwrite and n != a and m2[f] != a:
196 act("versions differ", "m", f, f, f, fmerge(f), False)
238 act("versions differ", "m", f, f, f, fmerge(f), False)
197 # are we clobbering?
239 # are we clobbering?
198 # is remote's version newer?
240 # is remote's version newer?
199 # or are we going back in time and clean?
241 # or are we going back in time and clean?
200 elif overwrite or m2[f] != a or (backwards and not n[20:]):
242 elif overwrite or m2[f] != a or (backwards and not n[20:]):
201 act("remote is newer", "g", f, m2.execf(f))
243 act("remote is newer", "g", f, m2.execf(f))
202 # local is newer, not overwrite, check mode bits
244 # local is newer, not overwrite, check mode bits
203 elif fmerge(f) != m1.execf(f):
245 elif fmerge(f) != m1.execf(f):
204 act("update permissions", "e", f, m2.execf(f))
246 act("update permissions", "e", f, m2.execf(f))
205 # contents same, check mode bits
247 # contents same, check mode bits
206 elif m1.execf(f) != m2.execf(f):
248 elif m1.execf(f) != m2.execf(f):
207 if overwrite or fmerge(f) != m1.execf(f):
249 if overwrite or fmerge(f) != m1.execf(f):
208 act("update permissions", "e", f, m2.execf(f))
250 act("update permissions", "e", f, m2.execf(f))
209 elif f in copied:
251 elif f in copied:
210 continue
252 continue
211 elif f in copy:
253 elif f in copy:
212 f2 = copy[f]
254 f2 = copy[f]
213 if f2 in m1: # case 2 A,B/B/B
255 if f2 not in m2: # directory rename
256 act("remote renamed directory to " + f2, "d",
257 f, None, f2, m1.execf(f))
258 elif f2 in m1: # case 2 A,B/B/B
214 act("local copied to " + f2, "m",
259 act("local copied to " + f2, "m",
215 f, f2, f, fmerge(f, f2, f2), False)
260 f, f2, f, fmerge(f, f2, f2), False)
216 else: # case 4,21 A/B/B
261 else: # case 4,21 A/B/B
217 act("local moved to " + f2, "m",
262 act("local moved to " + f2, "m",
218 f, f2, f, fmerge(f, f2, f2), False)
263 f, f2, f, fmerge(f, f2, f2), False)
219 elif f in ma:
264 elif f in ma:
220 if n != ma[f] and not overwrite:
265 if n != ma[f] and not overwrite:
221 if repo.ui.prompt(
266 if repo.ui.prompt(
222 (_(" local changed %s which remote deleted\n") % f) +
267 (_(" local changed %s which remote deleted\n") % f) +
223 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("d"):
268 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("d"):
224 act("prompt delete", "r", f)
269 act("prompt delete", "r", f)
225 else:
270 else:
226 act("other deleted", "r", f)
271 act("other deleted", "r", f)
227 else:
272 else:
228 # file is created on branch or in working directory
273 # file is created on branch or in working directory
229 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
274 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
230 act("remote deleted", "r", f)
275 act("remote deleted", "r", f)
231
276
232 for f, n in m2.iteritems():
277 for f, n in m2.iteritems():
233 if partial and not partial(f):
278 if partial and not partial(f):
234 continue
279 continue
235 if f in m1:
280 if f in m1:
236 continue
281 continue
237 if f in copied:
282 if f in copied:
238 continue
283 continue
239 if f in copy:
284 if f in copy:
240 f2 = copy[f]
285 f2 = copy[f]
241 if f2 in m2: # rename case 1, A/A,B/A
286 if f2 not in m1: # directory rename
287 act("local renamed directory to " + f2, "d",
288 None, f, f2, m2.execf(f))
289 elif f2 in m2: # rename case 1, A/A,B/A
242 act("remote copied to " + f, "m",
290 act("remote copied to " + f, "m",
243 f2, f, f, fmerge(f2, f, f2), False)
291 f2, f, f, fmerge(f2, f, f2), False)
244 else: # case 3,20 A/B/A
292 else: # case 3,20 A/B/A
245 act("remote moved to " + f, "m",
293 act("remote moved to " + f, "m",
246 f2, f, f, fmerge(f2, f, f2), True)
294 f2, f, f, fmerge(f2, f, f2), True)
247 elif f in ma:
295 elif f in ma:
248 if overwrite or backwards:
296 if overwrite or backwards:
249 act("recreating", "g", f, m2.execf(f))
297 act("recreating", "g", f, m2.execf(f))
250 elif n != ma[f]:
298 elif n != ma[f]:
251 if repo.ui.prompt(
299 if repo.ui.prompt(
252 (_("remote changed %s which local deleted\n") % f) +
300 (_("remote changed %s which local deleted\n") % f) +
253 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("k"):
301 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("k"):
254 act("prompt recreating", "g", f, m2.execf(f))
302 act("prompt recreating", "g", f, m2.execf(f))
255 else:
303 else:
256 act("remote created", "g", f, m2.execf(f))
304 act("remote created", "g", f, m2.execf(f))
257
305
258 return action
306 return action
259
307
260 def applyupdates(repo, action, wctx, mctx):
308 def applyupdates(repo, action, wctx, mctx):
261 "apply the merge action list to the working directory"
309 "apply the merge action list to the working directory"
262
310
263 updated, merged, removed, unresolved = 0, 0, 0, 0
311 updated, merged, removed, unresolved = 0, 0, 0, 0
264 action.sort()
312 action.sort()
265 for a in action:
313 for a in action:
266 f, m = a[:2]
314 f, m = a[:2]
267 if f[0] == "/":
315 if f and f[0] == "/":
268 continue
316 continue
269 if m == "r": # remove
317 if m == "r": # remove
270 repo.ui.note(_("removing %s\n") % f)
318 repo.ui.note(_("removing %s\n") % f)
271 util.audit_path(f)
319 util.audit_path(f)
272 try:
320 try:
273 util.unlink(repo.wjoin(f))
321 util.unlink(repo.wjoin(f))
274 except OSError, inst:
322 except OSError, inst:
275 if inst.errno != errno.ENOENT:
323 if inst.errno != errno.ENOENT:
276 repo.ui.warn(_("update failed to remove %s: %s!\n") %
324 repo.ui.warn(_("update failed to remove %s: %s!\n") %
277 (f, inst.strerror))
325 (f, inst.strerror))
278 removed += 1
326 removed += 1
279 elif m == "m": # merge
327 elif m == "m": # merge
280 f2, fd, flag, move = a[2:]
328 f2, fd, flag, move = a[2:]
281 r = filemerge(repo, f, f2, wctx, mctx)
329 r = filemerge(repo, f, f2, wctx, mctx)
282 if r > 0:
330 if r > 0:
283 unresolved += 1
331 unresolved += 1
284 else:
332 else:
285 if r is None:
333 if r is None:
286 updated += 1
334 updated += 1
287 else:
335 else:
288 merged += 1
336 merged += 1
289 if f != fd:
337 if f != fd:
290 repo.ui.debug(_("copying %s to %s\n") % (f, fd))
338 repo.ui.debug(_("copying %s to %s\n") % (f, fd))
291 repo.wwrite(fd, repo.wread(f))
339 repo.wwrite(fd, repo.wread(f))
292 if move:
340 if move:
293 repo.ui.debug(_("removing %s\n") % f)
341 repo.ui.debug(_("removing %s\n") % f)
294 os.unlink(repo.wjoin(f))
342 os.unlink(repo.wjoin(f))
295 util.set_exec(repo.wjoin(fd), flag)
343 util.set_exec(repo.wjoin(fd), flag)
296 elif m == "g": # get
344 elif m == "g": # get
297 flag = a[2]
345 flag = a[2]
298 repo.ui.note(_("getting %s\n") % f)
346 repo.ui.note(_("getting %s\n") % f)
299 t = mctx.filectx(f).data()
347 t = mctx.filectx(f).data()
300 repo.wwrite(f, t)
348 repo.wwrite(f, t)
301 util.set_exec(repo.wjoin(f), flag)
349 util.set_exec(repo.wjoin(f), flag)
302 updated += 1
350 updated += 1
351 elif m == "d": # directory rename
352 f2, fd, flag = a[2:]
353 if f:
354 repo.ui.note(_("moving %s to %s\n") % (f, fd))
355 t = wctx.filectx(f).data()
356 repo.wwrite(fd, t)
357 util.set_exec(repo.wjoin(fd), flag)
358 util.unlink(repo.wjoin(f))
359 if f2:
360 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
361 t = mctx.filectx(f2).data()
362 repo.wwrite(fd, t)
363 util.set_exec(repo.wjoin(fd), flag)
364 updated += 1
303 elif m == "e": # exec
365 elif m == "e": # exec
304 flag = a[2]
366 flag = a[2]
305 util.set_exec(repo.wjoin(f), flag)
367 util.set_exec(repo.wjoin(f), flag)
306
368
307 return updated, merged, removed, unresolved
369 return updated, merged, removed, unresolved
308
370
309 def recordupdates(repo, action, branchmerge):
371 def recordupdates(repo, action, branchmerge):
310 "record merge actions to the dirstate"
372 "record merge actions to the dirstate"
311
373
312 for a in action:
374 for a in action:
313 f, m = a[:2]
375 f, m = a[:2]
314 if m == "r": # remove
376 if m == "r": # remove
315 if branchmerge:
377 if branchmerge:
316 repo.dirstate.update([f], 'r')
378 repo.dirstate.update([f], 'r')
317 else:
379 else:
318 repo.dirstate.forget([f])
380 repo.dirstate.forget([f])
319 elif m == "f": # forget
381 elif m == "f": # forget
320 repo.dirstate.forget([f])
382 repo.dirstate.forget([f])
321 elif m == "g": # get
383 elif m == "g": # get
322 if branchmerge:
384 if branchmerge:
323 repo.dirstate.update([f], 'n', st_mtime=-1)
385 repo.dirstate.update([f], 'n', st_mtime=-1)
324 else:
386 else:
325 repo.dirstate.update([f], 'n')
387 repo.dirstate.update([f], 'n')
326 elif m == "m": # merge
388 elif m == "m": # merge
327 f2, fd, flag, move = a[2:]
389 f2, fd, flag, move = a[2:]
328 if branchmerge:
390 if branchmerge:
329 # We've done a branch merge, mark this file as merged
391 # We've done a branch merge, mark this file as merged
330 # so that we properly record the merger later
392 # so that we properly record the merger later
331 repo.dirstate.update([fd], 'm')
393 repo.dirstate.update([fd], 'm')
332 if f != f2: # copy/rename
394 if f != f2: # copy/rename
333 if move:
395 if move:
334 repo.dirstate.update([f], 'r')
396 repo.dirstate.update([f], 'r')
335 if f != fd:
397 if f != fd:
336 repo.dirstate.copy(f, fd)
398 repo.dirstate.copy(f, fd)
337 else:
399 else:
338 repo.dirstate.copy(f2, fd)
400 repo.dirstate.copy(f2, fd)
339 else:
401 else:
340 # We've update-merged a locally modified file, so
402 # We've update-merged a locally modified file, so
341 # we set the dirstate to emulate a normal checkout
403 # we set the dirstate to emulate a normal checkout
342 # of that file some time in the past. Thus our
404 # of that file some time in the past. Thus our
343 # merge will appear as a normal local file
405 # merge will appear as a normal local file
344 # modification.
406 # modification.
345 repo.dirstate.update([fd], 'n', st_size=-1, st_mtime=-1)
407 repo.dirstate.update([fd], 'n', st_size=-1, st_mtime=-1)
346 if move:
408 if move:
347 repo.dirstate.forget([f])
409 repo.dirstate.forget([f])
410 elif m == "d": # directory rename
411 f2, fd, flag = a[2:]
412 if branchmerge:
413 repo.dirstate.update([fd], 'a')
414 if f:
415 repo.dirstate.update([f], 'r')
416 repo.dirstate.copy(f, fd)
417 if f2:
418 repo.dirstate.copy(f2, fd)
419 else:
420 repo.dirstate.update([fd], 'n')
421 if f:
422 repo.dirstate.forget([f])
348
423
349 def update(repo, node, branchmerge, force, partial, wlock):
424 def update(repo, node, branchmerge, force, partial, wlock):
350 """
425 """
351 Perform a merge between the working directory and the given node
426 Perform a merge between the working directory and the given node
352
427
353 branchmerge = whether to merge between branches
428 branchmerge = whether to merge between branches
354 force = whether to force branch merging or file overwriting
429 force = whether to force branch merging or file overwriting
355 partial = a function to filter file lists (dirstate not updated)
430 partial = a function to filter file lists (dirstate not updated)
356 wlock = working dir lock, if already held
431 wlock = working dir lock, if already held
357 """
432 """
358
433
359 if not wlock:
434 if not wlock:
360 wlock = repo.wlock()
435 wlock = repo.wlock()
361
436
362 overwrite = force and not branchmerge
437 overwrite = force and not branchmerge
363 forcemerge = force and branchmerge
438 forcemerge = force and branchmerge
364 wc = repo.workingctx()
439 wc = repo.workingctx()
365 pl = wc.parents()
440 pl = wc.parents()
366 p1, p2 = pl[0], repo.changectx(node)
441 p1, p2 = pl[0], repo.changectx(node)
367 pa = p1.ancestor(p2)
442 pa = p1.ancestor(p2)
368 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
443 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
369
444
370 ### check phase
445 ### check phase
371 if not overwrite and len(pl) > 1:
446 if not overwrite and len(pl) > 1:
372 raise util.Abort(_("outstanding uncommitted merges"))
447 raise util.Abort(_("outstanding uncommitted merges"))
373 if pa == p1 or pa == p2: # is there a linear path from p1 to p2?
448 if pa == p1 or pa == p2: # is there a linear path from p1 to p2?
374 if branchmerge:
449 if branchmerge:
375 raise util.Abort(_("there is nothing to merge, just use "
450 raise util.Abort(_("there is nothing to merge, just use "
376 "'hg update' or look at 'hg heads'"))
451 "'hg update' or look at 'hg heads'"))
377 elif not (overwrite or branchmerge):
452 elif not (overwrite or branchmerge):
378 raise util.Abort(_("update spans branches, use 'hg merge' "
453 raise util.Abort(_("update spans branches, use 'hg merge' "
379 "or 'hg update -C' to lose changes"))
454 "or 'hg update -C' to lose changes"))
380 if branchmerge and not forcemerge:
455 if branchmerge and not forcemerge:
381 if wc.files():
456 if wc.files():
382 raise util.Abort(_("outstanding uncommitted changes"))
457 raise util.Abort(_("outstanding uncommitted changes"))
383
458
384 ### calculate phase
459 ### calculate phase
385 action = []
460 action = []
386 if not force:
461 if not force:
387 checkunknown(wc, p2)
462 checkunknown(wc, p2)
388 if not branchmerge:
463 if not branchmerge:
389 action += forgetremoved(wc, p2)
464 action += forgetremoved(wc, p2)
390 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
465 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
391
466
392 ### apply phase
467 ### apply phase
393 if not branchmerge: # just jump to the new rev
468 if not branchmerge: # just jump to the new rev
394 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
469 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
395 if not partial:
470 if not partial:
396 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
471 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
397
472
398 stats = applyupdates(repo, action, wc, p2)
473 stats = applyupdates(repo, action, wc, p2)
399
474
400 if not partial:
475 if not partial:
401 recordupdates(repo, action, branchmerge)
476 recordupdates(repo, action, branchmerge)
402 repo.dirstate.setparents(fp1, fp2)
477 repo.dirstate.setparents(fp1, fp2)
403 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
478 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
404 if not branchmerge:
479 if not branchmerge:
405 repo.opener("branch", "w").write(p2.branch() + "\n")
480 repo.opener("branch", "w").write(p2.branch() + "\n")
406
481
407 return stats
482 return stats
408
483
General Comments 0
You need to be logged in to leave comments. Login now