##// END OF EJS Templates
hooks: separate hook code into a separate module
Matt Mackall -
r4622:fff50306 default
parent child Browse files
Show More
@@ -0,0 +1,96
1 # hook.py - hook support for mercurial
2 #
3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
4 #
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
7
8 from i18n import _
9 import util
10
11 def _pythonhook(ui, repo, name, hname, funcname, args, throw):
12 '''call python hook. hook is callable object, looked up as
13 name in python module. if callable returns "true", hook
14 fails, else passes. if hook raises exception, treated as
15 hook failure. exception propagates if throw is "true".
16
17 reason for "true" meaning "hook failed" is so that
18 unmodified commands (e.g. mercurial.commands.update) can
19 be run as hooks without wrappers to convert return values.'''
20
21 ui.note(_("calling hook %s: %s\n") % (hname, funcname))
22 obj = funcname
23 if not callable(obj):
24 d = funcname.rfind('.')
25 if d == -1:
26 raise util.Abort(_('%s hook is invalid ("%s" not in '
27 'a module)') % (hname, funcname))
28 modname = funcname[:d]
29 try:
30 obj = __import__(modname)
31 except ImportError:
32 try:
33 # extensions are loaded with hgext_ prefix
34 obj = __import__("hgext_%s" % modname)
35 except ImportError:
36 raise util.Abort(_('%s hook is invalid '
37 '(import of "%s" failed)') %
38 (hname, modname))
39 try:
40 for p in funcname.split('.')[1:]:
41 obj = getattr(obj, p)
42 except AttributeError, err:
43 raise util.Abort(_('%s hook is invalid '
44 '("%s" is not defined)') %
45 (hname, funcname))
46 if not callable(obj):
47 raise util.Abort(_('%s hook is invalid '
48 '("%s" is not callable)') %
49 (hname, funcname))
50 try:
51 r = obj(ui=ui, repo=repo, hooktype=name, **args)
52 except (KeyboardInterrupt, util.SignalInterrupt):
53 raise
54 except Exception, exc:
55 if isinstance(exc, util.Abort):
56 ui.warn(_('error: %s hook failed: %s\n') %
57 (hname, exc.args[0]))
58 else:
59 ui.warn(_('error: %s hook raised an exception: '
60 '%s\n') % (hname, exc))
61 if throw:
62 raise
63 ui.print_exc()
64 return True
65 if r:
66 if throw:
67 raise util.Abort(_('%s hook failed') % hname)
68 ui.warn(_('warning: %s hook failed\n') % hname)
69 return r
70
71 def _exthook(ui, repo, name, cmd, args, throw):
72 ui.note(_("running hook %s: %s\n") % (name, cmd))
73 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
74 r = util.system(cmd, environ=env, cwd=repo.root)
75 if r:
76 desc, r = util.explain_exit(r)
77 if throw:
78 raise util.Abort(_('%s hook %s') % (name, desc))
79 ui.warn(_('warning: %s hook %s\n') % (name, desc))
80 return r
81
82 def hook(ui, repo, name, throw=False, **args):
83 r = False
84 hooks = [(hname, cmd) for hname, cmd in ui.configitems("hooks")
85 if hname.split(".", 1)[0] == name and cmd]
86 hooks.sort()
87 for hname, cmd in hooks:
88 if callable(cmd):
89 r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
90 elif cmd.startswith('python:'):
91 r = _pythonhook(ui, repo, name, hname, cmd[7:].strip(),
92 args, throw) or r
93 else:
94 r = _exthook(ui, repo, hname, cmd, args, throw) or r
95 return r
96
@@ -1,1965 +1,1883
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util, extensions
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.path = path
23 self.path = path
24 self.root = os.path.realpath(path)
24 self.root = os.path.realpath(path)
25 self.path = os.path.join(self.root, ".hg")
25 self.path = os.path.join(self.root, ".hg")
26 self.origroot = path
26 self.origroot = path
27 self.opener = util.opener(self.path)
27 self.opener = util.opener(self.path)
28 self.wopener = util.opener(self.root)
28 self.wopener = util.opener(self.root)
29
29
30 if not os.path.isdir(self.path):
30 if not os.path.isdir(self.path):
31 if create:
31 if create:
32 if not os.path.exists(path):
32 if not os.path.exists(path):
33 os.mkdir(path)
33 os.mkdir(path)
34 os.mkdir(self.path)
34 os.mkdir(self.path)
35 requirements = ["revlogv1"]
35 requirements = ["revlogv1"]
36 if parentui.configbool('format', 'usestore', True):
36 if parentui.configbool('format', 'usestore', True):
37 os.mkdir(os.path.join(self.path, "store"))
37 os.mkdir(os.path.join(self.path, "store"))
38 requirements.append("store")
38 requirements.append("store")
39 # create an invalid changelog
39 # create an invalid changelog
40 self.opener("00changelog.i", "a").write(
40 self.opener("00changelog.i", "a").write(
41 '\0\0\0\2' # represents revlogv2
41 '\0\0\0\2' # represents revlogv2
42 ' dummy changelog to prevent using the old repo layout'
42 ' dummy changelog to prevent using the old repo layout'
43 )
43 )
44 reqfile = self.opener("requires", "w")
44 reqfile = self.opener("requires", "w")
45 for r in requirements:
45 for r in requirements:
46 reqfile.write("%s\n" % r)
46 reqfile.write("%s\n" % r)
47 reqfile.close()
47 reqfile.close()
48 else:
48 else:
49 raise repo.RepoError(_("repository %s not found") % path)
49 raise repo.RepoError(_("repository %s not found") % path)
50 elif create:
50 elif create:
51 raise repo.RepoError(_("repository %s already exists") % path)
51 raise repo.RepoError(_("repository %s already exists") % path)
52 else:
52 else:
53 # find requirements
53 # find requirements
54 try:
54 try:
55 requirements = self.opener("requires").read().splitlines()
55 requirements = self.opener("requires").read().splitlines()
56 except IOError, inst:
56 except IOError, inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 requirements = []
59 requirements = []
60 # check them
60 # check them
61 for r in requirements:
61 for r in requirements:
62 if r not in self.supported:
62 if r not in self.supported:
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64
64
65 # setup store
65 # setup store
66 if "store" in requirements:
66 if "store" in requirements:
67 self.encodefn = util.encodefilename
67 self.encodefn = util.encodefilename
68 self.decodefn = util.decodefilename
68 self.decodefn = util.decodefilename
69 self.spath = os.path.join(self.path, "store")
69 self.spath = os.path.join(self.path, "store")
70 else:
70 else:
71 self.encodefn = lambda x: x
71 self.encodefn = lambda x: x
72 self.decodefn = lambda x: x
72 self.decodefn = lambda x: x
73 self.spath = self.path
73 self.spath = self.path
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75
75
76 self.ui = ui.ui(parentui=parentui)
76 self.ui = ui.ui(parentui=parentui)
77 try:
77 try:
78 self.ui.readconfig(self.join("hgrc"), self.root)
78 self.ui.readconfig(self.join("hgrc"), self.root)
79 extensions.loadall(self.ui)
79 extensions.loadall(self.ui)
80 except IOError:
80 except IOError:
81 pass
81 pass
82
82
83 self.tagscache = None
83 self.tagscache = None
84 self.branchcache = None
84 self.branchcache = None
85 self.nodetagscache = None
85 self.nodetagscache = None
86 self.filterpats = {}
86 self.filterpats = {}
87 self.transhandle = None
87 self.transhandle = None
88
88
89 def __getattr__(self, name):
89 def __getattr__(self, name):
90 if name == 'changelog':
90 if name == 'changelog':
91 self.changelog = changelog.changelog(self.sopener)
91 self.changelog = changelog.changelog(self.sopener)
92 self.sopener.defversion = self.changelog.version
92 self.sopener.defversion = self.changelog.version
93 return self.changelog
93 return self.changelog
94 if name == 'manifest':
94 if name == 'manifest':
95 self.changelog
95 self.changelog
96 self.manifest = manifest.manifest(self.sopener)
96 self.manifest = manifest.manifest(self.sopener)
97 return self.manifest
97 return self.manifest
98 if name == 'dirstate':
98 if name == 'dirstate':
99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 return self.dirstate
100 return self.dirstate
101 else:
101 else:
102 raise AttributeError, name
102 raise AttributeError, name
103
103
104 def url(self):
104 def url(self):
105 return 'file:' + self.root
105 return 'file:' + self.root
106
106
107 def hook(self, name, throw=False, **args):
107 def hook(self, name, throw=False, **args):
108 def callhook(hname, funcname):
108 return hook.hook(self.ui, self, name, throw, **args)
109 '''call python hook. hook is callable object, looked up as
110 name in python module. if callable returns "true", hook
111 fails, else passes. if hook raises exception, treated as
112 hook failure. exception propagates if throw is "true".
113
114 reason for "true" meaning "hook failed" is so that
115 unmodified commands (e.g. mercurial.commands.update) can
116 be run as hooks without wrappers to convert return values.'''
117
118 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
119 obj = funcname
120 if not callable(obj):
121 d = funcname.rfind('.')
122 if d == -1:
123 raise util.Abort(_('%s hook is invalid ("%s" not in '
124 'a module)') % (hname, funcname))
125 modname = funcname[:d]
126 try:
127 obj = __import__(modname)
128 except ImportError:
129 try:
130 # extensions are loaded with hgext_ prefix
131 obj = __import__("hgext_%s" % modname)
132 except ImportError:
133 raise util.Abort(_('%s hook is invalid '
134 '(import of "%s" failed)') %
135 (hname, modname))
136 try:
137 for p in funcname.split('.')[1:]:
138 obj = getattr(obj, p)
139 except AttributeError, err:
140 raise util.Abort(_('%s hook is invalid '
141 '("%s" is not defined)') %
142 (hname, funcname))
143 if not callable(obj):
144 raise util.Abort(_('%s hook is invalid '
145 '("%s" is not callable)') %
146 (hname, funcname))
147 try:
148 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
149 except (KeyboardInterrupt, util.SignalInterrupt):
150 raise
151 except Exception, exc:
152 if isinstance(exc, util.Abort):
153 self.ui.warn(_('error: %s hook failed: %s\n') %
154 (hname, exc.args[0]))
155 else:
156 self.ui.warn(_('error: %s hook raised an exception: '
157 '%s\n') % (hname, exc))
158 if throw:
159 raise
160 self.ui.print_exc()
161 return True
162 if r:
163 if throw:
164 raise util.Abort(_('%s hook failed') % hname)
165 self.ui.warn(_('warning: %s hook failed\n') % hname)
166 return r
167
168 def runhook(name, cmd):
169 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
170 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
171 r = util.system(cmd, environ=env, cwd=self.root)
172 if r:
173 desc, r = util.explain_exit(r)
174 if throw:
175 raise util.Abort(_('%s hook %s') % (name, desc))
176 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
177 return r
178
179 r = False
180 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
181 if hname.split(".", 1)[0] == name and cmd]
182 hooks.sort()
183 for hname, cmd in hooks:
184 if callable(cmd):
185 r = callhook(hname, cmd) or r
186 elif cmd.startswith('python:'):
187 r = callhook(hname, cmd[7:].strip()) or r
188 else:
189 r = runhook(hname, cmd) or r
190 return r
191
109
192 tag_disallowed = ':\r\n'
110 tag_disallowed = ':\r\n'
193
111
194 def _tag(self, name, node, message, local, user, date, parent=None):
112 def _tag(self, name, node, message, local, user, date, parent=None):
195 use_dirstate = parent is None
113 use_dirstate = parent is None
196
114
197 for c in self.tag_disallowed:
115 for c in self.tag_disallowed:
198 if c in name:
116 if c in name:
199 raise util.Abort(_('%r cannot be used in a tag name') % c)
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
200
118
201 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
202
120
203 if local:
121 if local:
204 # local tags are stored in the current charset
122 # local tags are stored in the current charset
205 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
123 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
206 self.hook('tag', node=hex(node), tag=name, local=local)
124 self.hook('tag', node=hex(node), tag=name, local=local)
207 return
125 return
208
126
209 # committed tags are stored in UTF-8
127 # committed tags are stored in UTF-8
210 line = '%s %s\n' % (hex(node), util.fromlocal(name))
128 line = '%s %s\n' % (hex(node), util.fromlocal(name))
211 if use_dirstate:
129 if use_dirstate:
212 self.wfile('.hgtags', 'ab').write(line)
130 self.wfile('.hgtags', 'ab').write(line)
213 else:
131 else:
214 ntags = self.filectx('.hgtags', parent).data()
132 ntags = self.filectx('.hgtags', parent).data()
215 self.wfile('.hgtags', 'ab').write(ntags + line)
133 self.wfile('.hgtags', 'ab').write(ntags + line)
216 if use_dirstate and self.dirstate.state('.hgtags') == '?':
134 if use_dirstate and self.dirstate.state('.hgtags') == '?':
217 self.add(['.hgtags'])
135 self.add(['.hgtags'])
218
136
219 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
137 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
220
138
221 self.hook('tag', node=hex(node), tag=name, local=local)
139 self.hook('tag', node=hex(node), tag=name, local=local)
222
140
223 return tagnode
141 return tagnode
224
142
225 def tag(self, name, node, message, local, user, date):
143 def tag(self, name, node, message, local, user, date):
226 '''tag a revision with a symbolic name.
144 '''tag a revision with a symbolic name.
227
145
228 if local is True, the tag is stored in a per-repository file.
146 if local is True, the tag is stored in a per-repository file.
229 otherwise, it is stored in the .hgtags file, and a new
147 otherwise, it is stored in the .hgtags file, and a new
230 changeset is committed with the change.
148 changeset is committed with the change.
231
149
232 keyword arguments:
150 keyword arguments:
233
151
234 local: whether to store tag in non-version-controlled file
152 local: whether to store tag in non-version-controlled file
235 (default False)
153 (default False)
236
154
237 message: commit message to use if committing
155 message: commit message to use if committing
238
156
239 user: name of user to use if committing
157 user: name of user to use if committing
240
158
241 date: date tuple to use if committing'''
159 date: date tuple to use if committing'''
242
160
243 for x in self.status()[:5]:
161 for x in self.status()[:5]:
244 if '.hgtags' in x:
162 if '.hgtags' in x:
245 raise util.Abort(_('working copy of .hgtags is changed '
163 raise util.Abort(_('working copy of .hgtags is changed '
246 '(please commit .hgtags manually)'))
164 '(please commit .hgtags manually)'))
247
165
248
166
249 self._tag(name, node, message, local, user, date)
167 self._tag(name, node, message, local, user, date)
250
168
251 def tags(self):
169 def tags(self):
252 '''return a mapping of tag to node'''
170 '''return a mapping of tag to node'''
253 if self.tagscache:
171 if self.tagscache:
254 return self.tagscache
172 return self.tagscache
255
173
256 globaltags = {}
174 globaltags = {}
257
175
258 def readtags(lines, fn):
176 def readtags(lines, fn):
259 filetags = {}
177 filetags = {}
260 count = 0
178 count = 0
261
179
262 def warn(msg):
180 def warn(msg):
263 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
181 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
264
182
265 for l in lines:
183 for l in lines:
266 count += 1
184 count += 1
267 if not l:
185 if not l:
268 continue
186 continue
269 s = l.split(" ", 1)
187 s = l.split(" ", 1)
270 if len(s) != 2:
188 if len(s) != 2:
271 warn(_("cannot parse entry"))
189 warn(_("cannot parse entry"))
272 continue
190 continue
273 node, key = s
191 node, key = s
274 key = util.tolocal(key.strip()) # stored in UTF-8
192 key = util.tolocal(key.strip()) # stored in UTF-8
275 try:
193 try:
276 bin_n = bin(node)
194 bin_n = bin(node)
277 except TypeError:
195 except TypeError:
278 warn(_("node '%s' is not well formed") % node)
196 warn(_("node '%s' is not well formed") % node)
279 continue
197 continue
280 if bin_n not in self.changelog.nodemap:
198 if bin_n not in self.changelog.nodemap:
281 warn(_("tag '%s' refers to unknown node") % key)
199 warn(_("tag '%s' refers to unknown node") % key)
282 continue
200 continue
283
201
284 h = []
202 h = []
285 if key in filetags:
203 if key in filetags:
286 n, h = filetags[key]
204 n, h = filetags[key]
287 h.append(n)
205 h.append(n)
288 filetags[key] = (bin_n, h)
206 filetags[key] = (bin_n, h)
289
207
290 for k,nh in filetags.items():
208 for k,nh in filetags.items():
291 if k not in globaltags:
209 if k not in globaltags:
292 globaltags[k] = nh
210 globaltags[k] = nh
293 continue
211 continue
294 # we prefer the global tag if:
212 # we prefer the global tag if:
295 # it supercedes us OR
213 # it supercedes us OR
296 # mutual supercedes and it has a higher rank
214 # mutual supercedes and it has a higher rank
297 # otherwise we win because we're tip-most
215 # otherwise we win because we're tip-most
298 an, ah = nh
216 an, ah = nh
299 bn, bh = globaltags[k]
217 bn, bh = globaltags[k]
300 if bn != an and an in bh and \
218 if bn != an and an in bh and \
301 (bn not in ah or len(bh) > len(ah)):
219 (bn not in ah or len(bh) > len(ah)):
302 an = bn
220 an = bn
303 ah.extend([n for n in bh if n not in ah])
221 ah.extend([n for n in bh if n not in ah])
304 globaltags[k] = an, ah
222 globaltags[k] = an, ah
305
223
306 # read the tags file from each head, ending with the tip
224 # read the tags file from each head, ending with the tip
307 f = None
225 f = None
308 for rev, node, fnode in self._hgtagsnodes():
226 for rev, node, fnode in self._hgtagsnodes():
309 f = (f and f.filectx(fnode) or
227 f = (f and f.filectx(fnode) or
310 self.filectx('.hgtags', fileid=fnode))
228 self.filectx('.hgtags', fileid=fnode))
311 readtags(f.data().splitlines(), f)
229 readtags(f.data().splitlines(), f)
312
230
313 try:
231 try:
314 data = util.fromlocal(self.opener("localtags").read())
232 data = util.fromlocal(self.opener("localtags").read())
315 # localtags are stored in the local character set
233 # localtags are stored in the local character set
316 # while the internal tag table is stored in UTF-8
234 # while the internal tag table is stored in UTF-8
317 readtags(data.splitlines(), "localtags")
235 readtags(data.splitlines(), "localtags")
318 except IOError:
236 except IOError:
319 pass
237 pass
320
238
321 self.tagscache = {}
239 self.tagscache = {}
322 for k,nh in globaltags.items():
240 for k,nh in globaltags.items():
323 n = nh[0]
241 n = nh[0]
324 if n != nullid:
242 if n != nullid:
325 self.tagscache[k] = n
243 self.tagscache[k] = n
326 self.tagscache['tip'] = self.changelog.tip()
244 self.tagscache['tip'] = self.changelog.tip()
327
245
328 return self.tagscache
246 return self.tagscache
329
247
330 def _hgtagsnodes(self):
248 def _hgtagsnodes(self):
331 heads = self.heads()
249 heads = self.heads()
332 heads.reverse()
250 heads.reverse()
333 last = {}
251 last = {}
334 ret = []
252 ret = []
335 for node in heads:
253 for node in heads:
336 c = self.changectx(node)
254 c = self.changectx(node)
337 rev = c.rev()
255 rev = c.rev()
338 try:
256 try:
339 fnode = c.filenode('.hgtags')
257 fnode = c.filenode('.hgtags')
340 except revlog.LookupError:
258 except revlog.LookupError:
341 continue
259 continue
342 ret.append((rev, node, fnode))
260 ret.append((rev, node, fnode))
343 if fnode in last:
261 if fnode in last:
344 ret[last[fnode]] = None
262 ret[last[fnode]] = None
345 last[fnode] = len(ret) - 1
263 last[fnode] = len(ret) - 1
346 return [item for item in ret if item]
264 return [item for item in ret if item]
347
265
348 def tagslist(self):
266 def tagslist(self):
349 '''return a list of tags ordered by revision'''
267 '''return a list of tags ordered by revision'''
350 l = []
268 l = []
351 for t, n in self.tags().items():
269 for t, n in self.tags().items():
352 try:
270 try:
353 r = self.changelog.rev(n)
271 r = self.changelog.rev(n)
354 except:
272 except:
355 r = -2 # sort to the beginning of the list if unknown
273 r = -2 # sort to the beginning of the list if unknown
356 l.append((r, t, n))
274 l.append((r, t, n))
357 l.sort()
275 l.sort()
358 return [(t, n) for r, t, n in l]
276 return [(t, n) for r, t, n in l]
359
277
360 def nodetags(self, node):
278 def nodetags(self, node):
361 '''return the tags associated with a node'''
279 '''return the tags associated with a node'''
362 if not self.nodetagscache:
280 if not self.nodetagscache:
363 self.nodetagscache = {}
281 self.nodetagscache = {}
364 for t, n in self.tags().items():
282 for t, n in self.tags().items():
365 self.nodetagscache.setdefault(n, []).append(t)
283 self.nodetagscache.setdefault(n, []).append(t)
366 return self.nodetagscache.get(node, [])
284 return self.nodetagscache.get(node, [])
367
285
368 def _branchtags(self):
286 def _branchtags(self):
369 partial, last, lrev = self._readbranchcache()
287 partial, last, lrev = self._readbranchcache()
370
288
371 tiprev = self.changelog.count() - 1
289 tiprev = self.changelog.count() - 1
372 if lrev != tiprev:
290 if lrev != tiprev:
373 self._updatebranchcache(partial, lrev+1, tiprev+1)
291 self._updatebranchcache(partial, lrev+1, tiprev+1)
374 self._writebranchcache(partial, self.changelog.tip(), tiprev)
292 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375
293
376 return partial
294 return partial
377
295
378 def branchtags(self):
296 def branchtags(self):
379 if self.branchcache is not None:
297 if self.branchcache is not None:
380 return self.branchcache
298 return self.branchcache
381
299
382 self.branchcache = {} # avoid recursion in changectx
300 self.branchcache = {} # avoid recursion in changectx
383 partial = self._branchtags()
301 partial = self._branchtags()
384
302
385 # the branch cache is stored on disk as UTF-8, but in the local
303 # the branch cache is stored on disk as UTF-8, but in the local
386 # charset internally
304 # charset internally
387 for k, v in partial.items():
305 for k, v in partial.items():
388 self.branchcache[util.tolocal(k)] = v
306 self.branchcache[util.tolocal(k)] = v
389 return self.branchcache
307 return self.branchcache
390
308
391 def _readbranchcache(self):
309 def _readbranchcache(self):
392 partial = {}
310 partial = {}
393 try:
311 try:
394 f = self.opener("branch.cache")
312 f = self.opener("branch.cache")
395 lines = f.read().split('\n')
313 lines = f.read().split('\n')
396 f.close()
314 f.close()
397 except (IOError, OSError):
315 except (IOError, OSError):
398 return {}, nullid, nullrev
316 return {}, nullid, nullrev
399
317
400 try:
318 try:
401 last, lrev = lines.pop(0).split(" ", 1)
319 last, lrev = lines.pop(0).split(" ", 1)
402 last, lrev = bin(last), int(lrev)
320 last, lrev = bin(last), int(lrev)
403 if not (lrev < self.changelog.count() and
321 if not (lrev < self.changelog.count() and
404 self.changelog.node(lrev) == last): # sanity check
322 self.changelog.node(lrev) == last): # sanity check
405 # invalidate the cache
323 # invalidate the cache
406 raise ValueError('Invalid branch cache: unknown tip')
324 raise ValueError('Invalid branch cache: unknown tip')
407 for l in lines:
325 for l in lines:
408 if not l: continue
326 if not l: continue
409 node, label = l.split(" ", 1)
327 node, label = l.split(" ", 1)
410 partial[label.strip()] = bin(node)
328 partial[label.strip()] = bin(node)
411 except (KeyboardInterrupt, util.SignalInterrupt):
329 except (KeyboardInterrupt, util.SignalInterrupt):
412 raise
330 raise
413 except Exception, inst:
331 except Exception, inst:
414 if self.ui.debugflag:
332 if self.ui.debugflag:
415 self.ui.warn(str(inst), '\n')
333 self.ui.warn(str(inst), '\n')
416 partial, last, lrev = {}, nullid, nullrev
334 partial, last, lrev = {}, nullid, nullrev
417 return partial, last, lrev
335 return partial, last, lrev
418
336
419 def _writebranchcache(self, branches, tip, tiprev):
337 def _writebranchcache(self, branches, tip, tiprev):
420 try:
338 try:
421 f = self.opener("branch.cache", "w", atomictemp=True)
339 f = self.opener("branch.cache", "w", atomictemp=True)
422 f.write("%s %s\n" % (hex(tip), tiprev))
340 f.write("%s %s\n" % (hex(tip), tiprev))
423 for label, node in branches.iteritems():
341 for label, node in branches.iteritems():
424 f.write("%s %s\n" % (hex(node), label))
342 f.write("%s %s\n" % (hex(node), label))
425 f.rename()
343 f.rename()
426 except (IOError, OSError):
344 except (IOError, OSError):
427 pass
345 pass
428
346
429 def _updatebranchcache(self, partial, start, end):
347 def _updatebranchcache(self, partial, start, end):
430 for r in xrange(start, end):
348 for r in xrange(start, end):
431 c = self.changectx(r)
349 c = self.changectx(r)
432 b = c.branch()
350 b = c.branch()
433 partial[b] = c.node()
351 partial[b] = c.node()
434
352
435 def lookup(self, key):
353 def lookup(self, key):
436 if key == '.':
354 if key == '.':
437 key, second = self.dirstate.parents()
355 key, second = self.dirstate.parents()
438 if key == nullid:
356 if key == nullid:
439 raise repo.RepoError(_("no revision checked out"))
357 raise repo.RepoError(_("no revision checked out"))
440 if second != nullid:
358 if second != nullid:
441 self.ui.warn(_("warning: working directory has two parents, "
359 self.ui.warn(_("warning: working directory has two parents, "
442 "tag '.' uses the first\n"))
360 "tag '.' uses the first\n"))
443 elif key == 'null':
361 elif key == 'null':
444 return nullid
362 return nullid
445 n = self.changelog._match(key)
363 n = self.changelog._match(key)
446 if n:
364 if n:
447 return n
365 return n
448 if key in self.tags():
366 if key in self.tags():
449 return self.tags()[key]
367 return self.tags()[key]
450 if key in self.branchtags():
368 if key in self.branchtags():
451 return self.branchtags()[key]
369 return self.branchtags()[key]
452 n = self.changelog._partialmatch(key)
370 n = self.changelog._partialmatch(key)
453 if n:
371 if n:
454 return n
372 return n
455 raise repo.RepoError(_("unknown revision '%s'") % key)
373 raise repo.RepoError(_("unknown revision '%s'") % key)
456
374
457 def dev(self):
375 def dev(self):
458 return os.lstat(self.path).st_dev
376 return os.lstat(self.path).st_dev
459
377
460 def local(self):
378 def local(self):
461 return True
379 return True
462
380
463 def join(self, f):
381 def join(self, f):
464 return os.path.join(self.path, f)
382 return os.path.join(self.path, f)
465
383
466 def sjoin(self, f):
384 def sjoin(self, f):
467 f = self.encodefn(f)
385 f = self.encodefn(f)
468 return os.path.join(self.spath, f)
386 return os.path.join(self.spath, f)
469
387
470 def wjoin(self, f):
388 def wjoin(self, f):
471 return os.path.join(self.root, f)
389 return os.path.join(self.root, f)
472
390
473 def file(self, f):
391 def file(self, f):
474 if f[0] == '/':
392 if f[0] == '/':
475 f = f[1:]
393 f = f[1:]
476 return filelog.filelog(self.sopener, f)
394 return filelog.filelog(self.sopener, f)
477
395
478 def changectx(self, changeid=None):
396 def changectx(self, changeid=None):
479 return context.changectx(self, changeid)
397 return context.changectx(self, changeid)
480
398
481 def workingctx(self):
399 def workingctx(self):
482 return context.workingctx(self)
400 return context.workingctx(self)
483
401
484 def parents(self, changeid=None):
402 def parents(self, changeid=None):
485 '''
403 '''
486 get list of changectxs for parents of changeid or working directory
404 get list of changectxs for parents of changeid or working directory
487 '''
405 '''
488 if changeid is None:
406 if changeid is None:
489 pl = self.dirstate.parents()
407 pl = self.dirstate.parents()
490 else:
408 else:
491 n = self.changelog.lookup(changeid)
409 n = self.changelog.lookup(changeid)
492 pl = self.changelog.parents(n)
410 pl = self.changelog.parents(n)
493 if pl[1] == nullid:
411 if pl[1] == nullid:
494 return [self.changectx(pl[0])]
412 return [self.changectx(pl[0])]
495 return [self.changectx(pl[0]), self.changectx(pl[1])]
413 return [self.changectx(pl[0]), self.changectx(pl[1])]
496
414
497 def filectx(self, path, changeid=None, fileid=None):
415 def filectx(self, path, changeid=None, fileid=None):
498 """changeid can be a changeset revision, node, or tag.
416 """changeid can be a changeset revision, node, or tag.
499 fileid can be a file revision or node."""
417 fileid can be a file revision or node."""
500 return context.filectx(self, path, changeid, fileid)
418 return context.filectx(self, path, changeid, fileid)
501
419
502 def getcwd(self):
420 def getcwd(self):
503 return self.dirstate.getcwd()
421 return self.dirstate.getcwd()
504
422
505 def pathto(self, f, cwd=None):
423 def pathto(self, f, cwd=None):
506 return self.dirstate.pathto(f, cwd)
424 return self.dirstate.pathto(f, cwd)
507
425
508 def wfile(self, f, mode='r'):
426 def wfile(self, f, mode='r'):
509 return self.wopener(f, mode)
427 return self.wopener(f, mode)
510
428
511 def _link(self, f):
429 def _link(self, f):
512 return os.path.islink(self.wjoin(f))
430 return os.path.islink(self.wjoin(f))
513
431
514 def _filter(self, filter, filename, data):
432 def _filter(self, filter, filename, data):
515 if filter not in self.filterpats:
433 if filter not in self.filterpats:
516 l = []
434 l = []
517 for pat, cmd in self.ui.configitems(filter):
435 for pat, cmd in self.ui.configitems(filter):
518 mf = util.matcher(self.root, "", [pat], [], [])[1]
436 mf = util.matcher(self.root, "", [pat], [], [])[1]
519 l.append((mf, cmd))
437 l.append((mf, cmd))
520 self.filterpats[filter] = l
438 self.filterpats[filter] = l
521
439
522 for mf, cmd in self.filterpats[filter]:
440 for mf, cmd in self.filterpats[filter]:
523 if mf(filename):
441 if mf(filename):
524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
442 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
525 data = util.filter(data, cmd)
443 data = util.filter(data, cmd)
526 break
444 break
527
445
528 return data
446 return data
529
447
530 def wread(self, filename):
448 def wread(self, filename):
531 if self._link(filename):
449 if self._link(filename):
532 data = os.readlink(self.wjoin(filename))
450 data = os.readlink(self.wjoin(filename))
533 else:
451 else:
534 data = self.wopener(filename, 'r').read()
452 data = self.wopener(filename, 'r').read()
535 return self._filter("encode", filename, data)
453 return self._filter("encode", filename, data)
536
454
537 def wwrite(self, filename, data, flags):
455 def wwrite(self, filename, data, flags):
538 data = self._filter("decode", filename, data)
456 data = self._filter("decode", filename, data)
539 if "l" in flags:
457 if "l" in flags:
540 f = self.wjoin(filename)
458 f = self.wjoin(filename)
541 try:
459 try:
542 os.unlink(f)
460 os.unlink(f)
543 except OSError:
461 except OSError:
544 pass
462 pass
545 d = os.path.dirname(f)
463 d = os.path.dirname(f)
546 if not os.path.exists(d):
464 if not os.path.exists(d):
547 os.makedirs(d)
465 os.makedirs(d)
548 os.symlink(data, f)
466 os.symlink(data, f)
549 else:
467 else:
550 try:
468 try:
551 if self._link(filename):
469 if self._link(filename):
552 os.unlink(self.wjoin(filename))
470 os.unlink(self.wjoin(filename))
553 except OSError:
471 except OSError:
554 pass
472 pass
555 self.wopener(filename, 'w').write(data)
473 self.wopener(filename, 'w').write(data)
556 util.set_exec(self.wjoin(filename), "x" in flags)
474 util.set_exec(self.wjoin(filename), "x" in flags)
557
475
558 def wwritedata(self, filename, data):
476 def wwritedata(self, filename, data):
559 return self._filter("decode", filename, data)
477 return self._filter("decode", filename, data)
560
478
561 def transaction(self):
479 def transaction(self):
562 tr = self.transhandle
480 tr = self.transhandle
563 if tr != None and tr.running():
481 if tr != None and tr.running():
564 return tr.nest()
482 return tr.nest()
565
483
566 # save dirstate for rollback
484 # save dirstate for rollback
567 try:
485 try:
568 ds = self.opener("dirstate").read()
486 ds = self.opener("dirstate").read()
569 except IOError:
487 except IOError:
570 ds = ""
488 ds = ""
571 self.opener("journal.dirstate", "w").write(ds)
489 self.opener("journal.dirstate", "w").write(ds)
572
490
573 renames = [(self.sjoin("journal"), self.sjoin("undo")),
491 renames = [(self.sjoin("journal"), self.sjoin("undo")),
574 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
492 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
575 tr = transaction.transaction(self.ui.warn, self.sopener,
493 tr = transaction.transaction(self.ui.warn, self.sopener,
576 self.sjoin("journal"),
494 self.sjoin("journal"),
577 aftertrans(renames))
495 aftertrans(renames))
578 self.transhandle = tr
496 self.transhandle = tr
579 return tr
497 return tr
580
498
581 def recover(self):
499 def recover(self):
582 l = self.lock()
500 l = self.lock()
583 if os.path.exists(self.sjoin("journal")):
501 if os.path.exists(self.sjoin("journal")):
584 self.ui.status(_("rolling back interrupted transaction\n"))
502 self.ui.status(_("rolling back interrupted transaction\n"))
585 transaction.rollback(self.sopener, self.sjoin("journal"))
503 transaction.rollback(self.sopener, self.sjoin("journal"))
586 self.invalidate()
504 self.invalidate()
587 return True
505 return True
588 else:
506 else:
589 self.ui.warn(_("no interrupted transaction available\n"))
507 self.ui.warn(_("no interrupted transaction available\n"))
590 return False
508 return False
591
509
592 def rollback(self, wlock=None, lock=None):
510 def rollback(self, wlock=None, lock=None):
593 if not wlock:
511 if not wlock:
594 wlock = self.wlock()
512 wlock = self.wlock()
595 if not lock:
513 if not lock:
596 lock = self.lock()
514 lock = self.lock()
597 if os.path.exists(self.sjoin("undo")):
515 if os.path.exists(self.sjoin("undo")):
598 self.ui.status(_("rolling back last transaction\n"))
516 self.ui.status(_("rolling back last transaction\n"))
599 transaction.rollback(self.sopener, self.sjoin("undo"))
517 transaction.rollback(self.sopener, self.sjoin("undo"))
600 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
518 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
601 self.invalidate()
519 self.invalidate()
602 self.dirstate.invalidate()
520 self.dirstate.invalidate()
603 else:
521 else:
604 self.ui.warn(_("no rollback information available\n"))
522 self.ui.warn(_("no rollback information available\n"))
605
523
606 def invalidate(self):
524 def invalidate(self):
607 for a in "changelog manifest".split():
525 for a in "changelog manifest".split():
608 if hasattr(self, a):
526 if hasattr(self, a):
609 self.__delattr__(a)
527 self.__delattr__(a)
610 self.tagscache = None
528 self.tagscache = None
611 self.nodetagscache = None
529 self.nodetagscache = None
612
530
613 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
531 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
614 desc=None):
532 desc=None):
615 try:
533 try:
616 l = lock.lock(lockname, 0, releasefn, desc=desc)
534 l = lock.lock(lockname, 0, releasefn, desc=desc)
617 except lock.LockHeld, inst:
535 except lock.LockHeld, inst:
618 if not wait:
536 if not wait:
619 raise
537 raise
620 self.ui.warn(_("waiting for lock on %s held by %r\n") %
538 self.ui.warn(_("waiting for lock on %s held by %r\n") %
621 (desc, inst.locker))
539 (desc, inst.locker))
622 # default to 600 seconds timeout
540 # default to 600 seconds timeout
623 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
541 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
624 releasefn, desc=desc)
542 releasefn, desc=desc)
625 if acquirefn:
543 if acquirefn:
626 acquirefn()
544 acquirefn()
627 return l
545 return l
628
546
629 def lock(self, wait=1):
547 def lock(self, wait=1):
630 return self.do_lock(self.sjoin("lock"), wait,
548 return self.do_lock(self.sjoin("lock"), wait,
631 acquirefn=self.invalidate,
549 acquirefn=self.invalidate,
632 desc=_('repository %s') % self.origroot)
550 desc=_('repository %s') % self.origroot)
633
551
634 def wlock(self, wait=1):
552 def wlock(self, wait=1):
635 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
553 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
636 self.dirstate.invalidate,
554 self.dirstate.invalidate,
637 desc=_('working directory of %s') % self.origroot)
555 desc=_('working directory of %s') % self.origroot)
638
556
639 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
557 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
640 """
558 """
641 commit an individual file as part of a larger transaction
559 commit an individual file as part of a larger transaction
642 """
560 """
643
561
644 t = self.wread(fn)
562 t = self.wread(fn)
645 fl = self.file(fn)
563 fl = self.file(fn)
646 fp1 = manifest1.get(fn, nullid)
564 fp1 = manifest1.get(fn, nullid)
647 fp2 = manifest2.get(fn, nullid)
565 fp2 = manifest2.get(fn, nullid)
648
566
649 meta = {}
567 meta = {}
650 cp = self.dirstate.copied(fn)
568 cp = self.dirstate.copied(fn)
651 if cp:
569 if cp:
652 # Mark the new revision of this file as a copy of another
570 # Mark the new revision of this file as a copy of another
653 # file. This copy data will effectively act as a parent
571 # file. This copy data will effectively act as a parent
654 # of this new revision. If this is a merge, the first
572 # of this new revision. If this is a merge, the first
655 # parent will be the nullid (meaning "look up the copy data")
573 # parent will be the nullid (meaning "look up the copy data")
656 # and the second one will be the other parent. For example:
574 # and the second one will be the other parent. For example:
657 #
575 #
658 # 0 --- 1 --- 3 rev1 changes file foo
576 # 0 --- 1 --- 3 rev1 changes file foo
659 # \ / rev2 renames foo to bar and changes it
577 # \ / rev2 renames foo to bar and changes it
660 # \- 2 -/ rev3 should have bar with all changes and
578 # \- 2 -/ rev3 should have bar with all changes and
661 # should record that bar descends from
579 # should record that bar descends from
662 # bar in rev2 and foo in rev1
580 # bar in rev2 and foo in rev1
663 #
581 #
664 # this allows this merge to succeed:
582 # this allows this merge to succeed:
665 #
583 #
666 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
584 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
667 # \ / merging rev3 and rev4 should use bar@rev2
585 # \ / merging rev3 and rev4 should use bar@rev2
668 # \- 2 --- 4 as the merge base
586 # \- 2 --- 4 as the merge base
669 #
587 #
670 meta["copy"] = cp
588 meta["copy"] = cp
671 if not manifest2: # not a branch merge
589 if not manifest2: # not a branch merge
672 meta["copyrev"] = hex(manifest1.get(cp, nullid))
590 meta["copyrev"] = hex(manifest1.get(cp, nullid))
673 fp2 = nullid
591 fp2 = nullid
674 elif fp2 != nullid: # copied on remote side
592 elif fp2 != nullid: # copied on remote side
675 meta["copyrev"] = hex(manifest1.get(cp, nullid))
593 meta["copyrev"] = hex(manifest1.get(cp, nullid))
676 elif fp1 != nullid: # copied on local side, reversed
594 elif fp1 != nullid: # copied on local side, reversed
677 meta["copyrev"] = hex(manifest2.get(cp))
595 meta["copyrev"] = hex(manifest2.get(cp))
678 fp2 = fp1
596 fp2 = fp1
679 else: # directory rename
597 else: # directory rename
680 meta["copyrev"] = hex(manifest1.get(cp, nullid))
598 meta["copyrev"] = hex(manifest1.get(cp, nullid))
681 self.ui.debug(_(" %s: copy %s:%s\n") %
599 self.ui.debug(_(" %s: copy %s:%s\n") %
682 (fn, cp, meta["copyrev"]))
600 (fn, cp, meta["copyrev"]))
683 fp1 = nullid
601 fp1 = nullid
684 elif fp2 != nullid:
602 elif fp2 != nullid:
685 # is one parent an ancestor of the other?
603 # is one parent an ancestor of the other?
686 fpa = fl.ancestor(fp1, fp2)
604 fpa = fl.ancestor(fp1, fp2)
687 if fpa == fp1:
605 if fpa == fp1:
688 fp1, fp2 = fp2, nullid
606 fp1, fp2 = fp2, nullid
689 elif fpa == fp2:
607 elif fpa == fp2:
690 fp2 = nullid
608 fp2 = nullid
691
609
692 # is the file unmodified from the parent? report existing entry
610 # is the file unmodified from the parent? report existing entry
693 if fp2 == nullid and not fl.cmp(fp1, t):
611 if fp2 == nullid and not fl.cmp(fp1, t):
694 return fp1
612 return fp1
695
613
696 changelist.append(fn)
614 changelist.append(fn)
697 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
615 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
698
616
699 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
617 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
700 if p1 is None:
618 if p1 is None:
701 p1, p2 = self.dirstate.parents()
619 p1, p2 = self.dirstate.parents()
702 return self.commit(files=files, text=text, user=user, date=date,
620 return self.commit(files=files, text=text, user=user, date=date,
703 p1=p1, p2=p2, wlock=wlock, extra=extra)
621 p1=p1, p2=p2, wlock=wlock, extra=extra)
704
622
705 def commit(self, files=None, text="", user=None, date=None,
623 def commit(self, files=None, text="", user=None, date=None,
706 match=util.always, force=False, lock=None, wlock=None,
624 match=util.always, force=False, lock=None, wlock=None,
707 force_editor=False, p1=None, p2=None, extra={}):
625 force_editor=False, p1=None, p2=None, extra={}):
708
626
709 commit = []
627 commit = []
710 remove = []
628 remove = []
711 changed = []
629 changed = []
712 use_dirstate = (p1 is None) # not rawcommit
630 use_dirstate = (p1 is None) # not rawcommit
713 extra = extra.copy()
631 extra = extra.copy()
714
632
715 if use_dirstate:
633 if use_dirstate:
716 if files:
634 if files:
717 for f in files:
635 for f in files:
718 s = self.dirstate.state(f)
636 s = self.dirstate.state(f)
719 if s in 'nmai':
637 if s in 'nmai':
720 commit.append(f)
638 commit.append(f)
721 elif s == 'r':
639 elif s == 'r':
722 remove.append(f)
640 remove.append(f)
723 else:
641 else:
724 self.ui.warn(_("%s not tracked!\n") % f)
642 self.ui.warn(_("%s not tracked!\n") % f)
725 else:
643 else:
726 changes = self.status(match=match)[:5]
644 changes = self.status(match=match)[:5]
727 modified, added, removed, deleted, unknown = changes
645 modified, added, removed, deleted, unknown = changes
728 commit = modified + added
646 commit = modified + added
729 remove = removed
647 remove = removed
730 else:
648 else:
731 commit = files
649 commit = files
732
650
733 if use_dirstate:
651 if use_dirstate:
734 p1, p2 = self.dirstate.parents()
652 p1, p2 = self.dirstate.parents()
735 update_dirstate = True
653 update_dirstate = True
736 else:
654 else:
737 p1, p2 = p1, p2 or nullid
655 p1, p2 = p1, p2 or nullid
738 update_dirstate = (self.dirstate.parents()[0] == p1)
656 update_dirstate = (self.dirstate.parents()[0] == p1)
739
657
740 c1 = self.changelog.read(p1)
658 c1 = self.changelog.read(p1)
741 c2 = self.changelog.read(p2)
659 c2 = self.changelog.read(p2)
742 m1 = self.manifest.read(c1[0]).copy()
660 m1 = self.manifest.read(c1[0]).copy()
743 m2 = self.manifest.read(c2[0])
661 m2 = self.manifest.read(c2[0])
744
662
745 if use_dirstate:
663 if use_dirstate:
746 branchname = self.workingctx().branch()
664 branchname = self.workingctx().branch()
747 try:
665 try:
748 branchname = branchname.decode('UTF-8').encode('UTF-8')
666 branchname = branchname.decode('UTF-8').encode('UTF-8')
749 except UnicodeDecodeError:
667 except UnicodeDecodeError:
750 raise util.Abort(_('branch name not in UTF-8!'))
668 raise util.Abort(_('branch name not in UTF-8!'))
751 else:
669 else:
752 branchname = ""
670 branchname = ""
753
671
754 if use_dirstate:
672 if use_dirstate:
755 oldname = c1[5].get("branch") # stored in UTF-8
673 oldname = c1[5].get("branch") # stored in UTF-8
756 if not commit and not remove and not force and p2 == nullid and \
674 if not commit and not remove and not force and p2 == nullid and \
757 branchname == oldname:
675 branchname == oldname:
758 self.ui.status(_("nothing changed\n"))
676 self.ui.status(_("nothing changed\n"))
759 return None
677 return None
760
678
761 xp1 = hex(p1)
679 xp1 = hex(p1)
762 if p2 == nullid: xp2 = ''
680 if p2 == nullid: xp2 = ''
763 else: xp2 = hex(p2)
681 else: xp2 = hex(p2)
764
682
765 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
683 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
766
684
767 if not wlock:
685 if not wlock:
768 wlock = self.wlock()
686 wlock = self.wlock()
769 if not lock:
687 if not lock:
770 lock = self.lock()
688 lock = self.lock()
771 tr = self.transaction()
689 tr = self.transaction()
772
690
773 # check in files
691 # check in files
774 new = {}
692 new = {}
775 linkrev = self.changelog.count()
693 linkrev = self.changelog.count()
776 commit.sort()
694 commit.sort()
777 is_exec = util.execfunc(self.root, m1.execf)
695 is_exec = util.execfunc(self.root, m1.execf)
778 is_link = util.linkfunc(self.root, m1.linkf)
696 is_link = util.linkfunc(self.root, m1.linkf)
779 for f in commit:
697 for f in commit:
780 self.ui.note(f + "\n")
698 self.ui.note(f + "\n")
781 try:
699 try:
782 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
700 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
783 new_exec = is_exec(f)
701 new_exec = is_exec(f)
784 new_link = is_link(f)
702 new_link = is_link(f)
785 if not changed or changed[-1] != f:
703 if not changed or changed[-1] != f:
786 # mention the file in the changelog if some flag changed,
704 # mention the file in the changelog if some flag changed,
787 # even if there was no content change.
705 # even if there was no content change.
788 old_exec = m1.execf(f)
706 old_exec = m1.execf(f)
789 old_link = m1.linkf(f)
707 old_link = m1.linkf(f)
790 if old_exec != new_exec or old_link != new_link:
708 if old_exec != new_exec or old_link != new_link:
791 changed.append(f)
709 changed.append(f)
792 m1.set(f, new_exec, new_link)
710 m1.set(f, new_exec, new_link)
793 except (OSError, IOError):
711 except (OSError, IOError):
794 if use_dirstate:
712 if use_dirstate:
795 self.ui.warn(_("trouble committing %s!\n") % f)
713 self.ui.warn(_("trouble committing %s!\n") % f)
796 raise
714 raise
797 else:
715 else:
798 remove.append(f)
716 remove.append(f)
799
717
800 # update manifest
718 # update manifest
801 m1.update(new)
719 m1.update(new)
802 remove.sort()
720 remove.sort()
803 removed = []
721 removed = []
804
722
805 for f in remove:
723 for f in remove:
806 if f in m1:
724 if f in m1:
807 del m1[f]
725 del m1[f]
808 removed.append(f)
726 removed.append(f)
809 elif f in m2:
727 elif f in m2:
810 removed.append(f)
728 removed.append(f)
811 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
729 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
812
730
813 # add changeset
731 # add changeset
814 new = new.keys()
732 new = new.keys()
815 new.sort()
733 new.sort()
816
734
817 user = user or self.ui.username()
735 user = user or self.ui.username()
818 if not text or force_editor:
736 if not text or force_editor:
819 edittext = []
737 edittext = []
820 if text:
738 if text:
821 edittext.append(text)
739 edittext.append(text)
822 edittext.append("")
740 edittext.append("")
823 edittext.append("HG: user: %s" % user)
741 edittext.append("HG: user: %s" % user)
824 if p2 != nullid:
742 if p2 != nullid:
825 edittext.append("HG: branch merge")
743 edittext.append("HG: branch merge")
826 if branchname:
744 if branchname:
827 edittext.append("HG: branch %s" % util.tolocal(branchname))
745 edittext.append("HG: branch %s" % util.tolocal(branchname))
828 edittext.extend(["HG: changed %s" % f for f in changed])
746 edittext.extend(["HG: changed %s" % f for f in changed])
829 edittext.extend(["HG: removed %s" % f for f in removed])
747 edittext.extend(["HG: removed %s" % f for f in removed])
830 if not changed and not remove:
748 if not changed and not remove:
831 edittext.append("HG: no files changed")
749 edittext.append("HG: no files changed")
832 edittext.append("")
750 edittext.append("")
833 # run editor in the repository root
751 # run editor in the repository root
834 olddir = os.getcwd()
752 olddir = os.getcwd()
835 os.chdir(self.root)
753 os.chdir(self.root)
836 text = self.ui.edit("\n".join(edittext), user)
754 text = self.ui.edit("\n".join(edittext), user)
837 os.chdir(olddir)
755 os.chdir(olddir)
838
756
839 lines = [line.rstrip() for line in text.rstrip().splitlines()]
757 lines = [line.rstrip() for line in text.rstrip().splitlines()]
840 while lines and not lines[0]:
758 while lines and not lines[0]:
841 del lines[0]
759 del lines[0]
842 if not lines:
760 if not lines:
843 return None
761 return None
844 text = '\n'.join(lines)
762 text = '\n'.join(lines)
845 if branchname:
763 if branchname:
846 extra["branch"] = branchname
764 extra["branch"] = branchname
847 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
765 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
848 user, date, extra)
766 user, date, extra)
849 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
767 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
850 parent2=xp2)
768 parent2=xp2)
851 tr.close()
769 tr.close()
852
770
853 if self.branchcache and "branch" in extra:
771 if self.branchcache and "branch" in extra:
854 self.branchcache[util.tolocal(extra["branch"])] = n
772 self.branchcache[util.tolocal(extra["branch"])] = n
855
773
856 if use_dirstate or update_dirstate:
774 if use_dirstate or update_dirstate:
857 self.dirstate.setparents(n)
775 self.dirstate.setparents(n)
858 if use_dirstate:
776 if use_dirstate:
859 self.dirstate.update(new, "n")
777 self.dirstate.update(new, "n")
860 self.dirstate.forget(removed)
778 self.dirstate.forget(removed)
861
779
862 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
780 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
863 return n
781 return n
864
782
865 def walk(self, node=None, files=[], match=util.always, badmatch=None):
783 def walk(self, node=None, files=[], match=util.always, badmatch=None):
866 '''
784 '''
867 walk recursively through the directory tree or a given
785 walk recursively through the directory tree or a given
868 changeset, finding all files matched by the match
786 changeset, finding all files matched by the match
869 function
787 function
870
788
871 results are yielded in a tuple (src, filename), where src
789 results are yielded in a tuple (src, filename), where src
872 is one of:
790 is one of:
873 'f' the file was found in the directory tree
791 'f' the file was found in the directory tree
874 'm' the file was only in the dirstate and not in the tree
792 'm' the file was only in the dirstate and not in the tree
875 'b' file was not found and matched badmatch
793 'b' file was not found and matched badmatch
876 '''
794 '''
877
795
878 if node:
796 if node:
879 fdict = dict.fromkeys(files)
797 fdict = dict.fromkeys(files)
880 # for dirstate.walk, files=['.'] means "walk the whole tree".
798 # for dirstate.walk, files=['.'] means "walk the whole tree".
881 # follow that here, too
799 # follow that here, too
882 fdict.pop('.', None)
800 fdict.pop('.', None)
883 mdict = self.manifest.read(self.changelog.read(node)[0])
801 mdict = self.manifest.read(self.changelog.read(node)[0])
884 mfiles = mdict.keys()
802 mfiles = mdict.keys()
885 mfiles.sort()
803 mfiles.sort()
886 for fn in mfiles:
804 for fn in mfiles:
887 for ffn in fdict:
805 for ffn in fdict:
888 # match if the file is the exact name or a directory
806 # match if the file is the exact name or a directory
889 if ffn == fn or fn.startswith("%s/" % ffn):
807 if ffn == fn or fn.startswith("%s/" % ffn):
890 del fdict[ffn]
808 del fdict[ffn]
891 break
809 break
892 if match(fn):
810 if match(fn):
893 yield 'm', fn
811 yield 'm', fn
894 ffiles = fdict.keys()
812 ffiles = fdict.keys()
895 ffiles.sort()
813 ffiles.sort()
896 for fn in ffiles:
814 for fn in ffiles:
897 if badmatch and badmatch(fn):
815 if badmatch and badmatch(fn):
898 if match(fn):
816 if match(fn):
899 yield 'b', fn
817 yield 'b', fn
900 else:
818 else:
901 self.ui.warn(_('%s: No such file in rev %s\n')
819 self.ui.warn(_('%s: No such file in rev %s\n')
902 % (self.pathto(fn), short(node)))
820 % (self.pathto(fn), short(node)))
903 else:
821 else:
904 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
822 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
905 yield src, fn
823 yield src, fn
906
824
907 def status(self, node1=None, node2=None, files=[], match=util.always,
825 def status(self, node1=None, node2=None, files=[], match=util.always,
908 wlock=None, list_ignored=False, list_clean=False):
826 wlock=None, list_ignored=False, list_clean=False):
909 """return status of files between two nodes or node and working directory
827 """return status of files between two nodes or node and working directory
910
828
911 If node1 is None, use the first dirstate parent instead.
829 If node1 is None, use the first dirstate parent instead.
912 If node2 is None, compare node1 with working directory.
830 If node2 is None, compare node1 with working directory.
913 """
831 """
914
832
915 def fcmp(fn, getnode):
833 def fcmp(fn, getnode):
916 t1 = self.wread(fn)
834 t1 = self.wread(fn)
917 return self.file(fn).cmp(getnode(fn), t1)
835 return self.file(fn).cmp(getnode(fn), t1)
918
836
919 def mfmatches(node):
837 def mfmatches(node):
920 change = self.changelog.read(node)
838 change = self.changelog.read(node)
921 mf = self.manifest.read(change[0]).copy()
839 mf = self.manifest.read(change[0]).copy()
922 for fn in mf.keys():
840 for fn in mf.keys():
923 if not match(fn):
841 if not match(fn):
924 del mf[fn]
842 del mf[fn]
925 return mf
843 return mf
926
844
927 modified, added, removed, deleted, unknown = [], [], [], [], []
845 modified, added, removed, deleted, unknown = [], [], [], [], []
928 ignored, clean = [], []
846 ignored, clean = [], []
929
847
930 compareworking = False
848 compareworking = False
931 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
849 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
932 compareworking = True
850 compareworking = True
933
851
934 if not compareworking:
852 if not compareworking:
935 # read the manifest from node1 before the manifest from node2,
853 # read the manifest from node1 before the manifest from node2,
936 # so that we'll hit the manifest cache if we're going through
854 # so that we'll hit the manifest cache if we're going through
937 # all the revisions in parent->child order.
855 # all the revisions in parent->child order.
938 mf1 = mfmatches(node1)
856 mf1 = mfmatches(node1)
939
857
940 mywlock = False
858 mywlock = False
941
859
942 # are we comparing the working directory?
860 # are we comparing the working directory?
943 if not node2:
861 if not node2:
944 (lookup, modified, added, removed, deleted, unknown,
862 (lookup, modified, added, removed, deleted, unknown,
945 ignored, clean) = self.dirstate.status(files, match,
863 ignored, clean) = self.dirstate.status(files, match,
946 list_ignored, list_clean)
864 list_ignored, list_clean)
947
865
948 # are we comparing working dir against its parent?
866 # are we comparing working dir against its parent?
949 if compareworking:
867 if compareworking:
950 if lookup:
868 if lookup:
951 # do a full compare of any files that might have changed
869 # do a full compare of any files that might have changed
952 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
870 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
953 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
871 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
954 nullid)
872 nullid)
955 for f in lookup:
873 for f in lookup:
956 if fcmp(f, getnode):
874 if fcmp(f, getnode):
957 modified.append(f)
875 modified.append(f)
958 else:
876 else:
959 if list_clean:
877 if list_clean:
960 clean.append(f)
878 clean.append(f)
961 if not wlock and not mywlock:
879 if not wlock and not mywlock:
962 mywlock = True
880 mywlock = True
963 try:
881 try:
964 wlock = self.wlock(wait=0)
882 wlock = self.wlock(wait=0)
965 except lock.LockException:
883 except lock.LockException:
966 pass
884 pass
967 if wlock:
885 if wlock:
968 self.dirstate.update([f], "n")
886 self.dirstate.update([f], "n")
969 else:
887 else:
970 # we are comparing working dir against non-parent
888 # we are comparing working dir against non-parent
971 # generate a pseudo-manifest for the working dir
889 # generate a pseudo-manifest for the working dir
972 # XXX: create it in dirstate.py ?
890 # XXX: create it in dirstate.py ?
973 mf2 = mfmatches(self.dirstate.parents()[0])
891 mf2 = mfmatches(self.dirstate.parents()[0])
974 is_exec = util.execfunc(self.root, mf2.execf)
892 is_exec = util.execfunc(self.root, mf2.execf)
975 is_link = util.linkfunc(self.root, mf2.linkf)
893 is_link = util.linkfunc(self.root, mf2.linkf)
976 for f in lookup + modified + added:
894 for f in lookup + modified + added:
977 mf2[f] = ""
895 mf2[f] = ""
978 mf2.set(f, is_exec(f), is_link(f))
896 mf2.set(f, is_exec(f), is_link(f))
979 for f in removed:
897 for f in removed:
980 if f in mf2:
898 if f in mf2:
981 del mf2[f]
899 del mf2[f]
982
900
983 if mywlock and wlock:
901 if mywlock and wlock:
984 wlock.release()
902 wlock.release()
985 else:
903 else:
986 # we are comparing two revisions
904 # we are comparing two revisions
987 mf2 = mfmatches(node2)
905 mf2 = mfmatches(node2)
988
906
989 if not compareworking:
907 if not compareworking:
990 # flush lists from dirstate before comparing manifests
908 # flush lists from dirstate before comparing manifests
991 modified, added, clean = [], [], []
909 modified, added, clean = [], [], []
992
910
993 # make sure to sort the files so we talk to the disk in a
911 # make sure to sort the files so we talk to the disk in a
994 # reasonable order
912 # reasonable order
995 mf2keys = mf2.keys()
913 mf2keys = mf2.keys()
996 mf2keys.sort()
914 mf2keys.sort()
997 getnode = lambda fn: mf1.get(fn, nullid)
915 getnode = lambda fn: mf1.get(fn, nullid)
998 for fn in mf2keys:
916 for fn in mf2keys:
999 if mf1.has_key(fn):
917 if mf1.has_key(fn):
1000 if mf1.flags(fn) != mf2.flags(fn) or \
918 if mf1.flags(fn) != mf2.flags(fn) or \
1001 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
919 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1002 fcmp(fn, getnode))):
920 fcmp(fn, getnode))):
1003 modified.append(fn)
921 modified.append(fn)
1004 elif list_clean:
922 elif list_clean:
1005 clean.append(fn)
923 clean.append(fn)
1006 del mf1[fn]
924 del mf1[fn]
1007 else:
925 else:
1008 added.append(fn)
926 added.append(fn)
1009
927
1010 removed = mf1.keys()
928 removed = mf1.keys()
1011
929
1012 # sort and return results:
930 # sort and return results:
1013 for l in modified, added, removed, deleted, unknown, ignored, clean:
931 for l in modified, added, removed, deleted, unknown, ignored, clean:
1014 l.sort()
932 l.sort()
1015 return (modified, added, removed, deleted, unknown, ignored, clean)
933 return (modified, added, removed, deleted, unknown, ignored, clean)
1016
934
1017 def add(self, list, wlock=None):
935 def add(self, list, wlock=None):
1018 if not wlock:
936 if not wlock:
1019 wlock = self.wlock()
937 wlock = self.wlock()
1020 for f in list:
938 for f in list:
1021 p = self.wjoin(f)
939 p = self.wjoin(f)
1022 try:
940 try:
1023 st = os.lstat(p)
941 st = os.lstat(p)
1024 except:
942 except:
1025 self.ui.warn(_("%s does not exist!\n") % f)
943 self.ui.warn(_("%s does not exist!\n") % f)
1026 continue
944 continue
1027 if st.st_size > 10000000:
945 if st.st_size > 10000000:
1028 self.ui.warn(_("%s: files over 10MB may cause memory and"
946 self.ui.warn(_("%s: files over 10MB may cause memory and"
1029 " performance problems\n"
947 " performance problems\n"
1030 "(use 'hg revert %s' to unadd the file)\n")
948 "(use 'hg revert %s' to unadd the file)\n")
1031 % (f, f))
949 % (f, f))
1032 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
950 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1033 self.ui.warn(_("%s not added: only files and symlinks "
951 self.ui.warn(_("%s not added: only files and symlinks "
1034 "supported currently\n") % f)
952 "supported currently\n") % f)
1035 elif self.dirstate.state(f) in 'an':
953 elif self.dirstate.state(f) in 'an':
1036 self.ui.warn(_("%s already tracked!\n") % f)
954 self.ui.warn(_("%s already tracked!\n") % f)
1037 else:
955 else:
1038 self.dirstate.update([f], "a")
956 self.dirstate.update([f], "a")
1039
957
1040 def forget(self, list, wlock=None):
958 def forget(self, list, wlock=None):
1041 if not wlock:
959 if not wlock:
1042 wlock = self.wlock()
960 wlock = self.wlock()
1043 for f in list:
961 for f in list:
1044 if self.dirstate.state(f) not in 'ai':
962 if self.dirstate.state(f) not in 'ai':
1045 self.ui.warn(_("%s not added!\n") % f)
963 self.ui.warn(_("%s not added!\n") % f)
1046 else:
964 else:
1047 self.dirstate.forget([f])
965 self.dirstate.forget([f])
1048
966
1049 def remove(self, list, unlink=False, wlock=None):
967 def remove(self, list, unlink=False, wlock=None):
1050 if unlink:
968 if unlink:
1051 for f in list:
969 for f in list:
1052 try:
970 try:
1053 util.unlink(self.wjoin(f))
971 util.unlink(self.wjoin(f))
1054 except OSError, inst:
972 except OSError, inst:
1055 if inst.errno != errno.ENOENT:
973 if inst.errno != errno.ENOENT:
1056 raise
974 raise
1057 if not wlock:
975 if not wlock:
1058 wlock = self.wlock()
976 wlock = self.wlock()
1059 for f in list:
977 for f in list:
1060 if unlink and os.path.exists(self.wjoin(f)):
978 if unlink and os.path.exists(self.wjoin(f)):
1061 self.ui.warn(_("%s still exists!\n") % f)
979 self.ui.warn(_("%s still exists!\n") % f)
1062 elif self.dirstate.state(f) == 'a':
980 elif self.dirstate.state(f) == 'a':
1063 self.dirstate.forget([f])
981 self.dirstate.forget([f])
1064 elif f not in self.dirstate:
982 elif f not in self.dirstate:
1065 self.ui.warn(_("%s not tracked!\n") % f)
983 self.ui.warn(_("%s not tracked!\n") % f)
1066 else:
984 else:
1067 self.dirstate.update([f], "r")
985 self.dirstate.update([f], "r")
1068
986
1069 def undelete(self, list, wlock=None):
987 def undelete(self, list, wlock=None):
1070 p = self.dirstate.parents()[0]
988 p = self.dirstate.parents()[0]
1071 mn = self.changelog.read(p)[0]
989 mn = self.changelog.read(p)[0]
1072 m = self.manifest.read(mn)
990 m = self.manifest.read(mn)
1073 if not wlock:
991 if not wlock:
1074 wlock = self.wlock()
992 wlock = self.wlock()
1075 for f in list:
993 for f in list:
1076 if self.dirstate.state(f) not in "r":
994 if self.dirstate.state(f) not in "r":
1077 self.ui.warn("%s not removed!\n" % f)
995 self.ui.warn("%s not removed!\n" % f)
1078 else:
996 else:
1079 t = self.file(f).read(m[f])
997 t = self.file(f).read(m[f])
1080 self.wwrite(f, t, m.flags(f))
998 self.wwrite(f, t, m.flags(f))
1081 self.dirstate.update([f], "n")
999 self.dirstate.update([f], "n")
1082
1000
1083 def copy(self, source, dest, wlock=None):
1001 def copy(self, source, dest, wlock=None):
1084 p = self.wjoin(dest)
1002 p = self.wjoin(dest)
1085 if not (os.path.exists(p) or os.path.islink(p)):
1003 if not (os.path.exists(p) or os.path.islink(p)):
1086 self.ui.warn(_("%s does not exist!\n") % dest)
1004 self.ui.warn(_("%s does not exist!\n") % dest)
1087 elif not (os.path.isfile(p) or os.path.islink(p)):
1005 elif not (os.path.isfile(p) or os.path.islink(p)):
1088 self.ui.warn(_("copy failed: %s is not a file or a "
1006 self.ui.warn(_("copy failed: %s is not a file or a "
1089 "symbolic link\n") % dest)
1007 "symbolic link\n") % dest)
1090 else:
1008 else:
1091 if not wlock:
1009 if not wlock:
1092 wlock = self.wlock()
1010 wlock = self.wlock()
1093 if self.dirstate.state(dest) == '?':
1011 if self.dirstate.state(dest) == '?':
1094 self.dirstate.update([dest], "a")
1012 self.dirstate.update([dest], "a")
1095 self.dirstate.copy(source, dest)
1013 self.dirstate.copy(source, dest)
1096
1014
1097 def heads(self, start=None):
1015 def heads(self, start=None):
1098 heads = self.changelog.heads(start)
1016 heads = self.changelog.heads(start)
1099 # sort the output in rev descending order
1017 # sort the output in rev descending order
1100 heads = [(-self.changelog.rev(h), h) for h in heads]
1018 heads = [(-self.changelog.rev(h), h) for h in heads]
1101 heads.sort()
1019 heads.sort()
1102 return [n for (r, n) in heads]
1020 return [n for (r, n) in heads]
1103
1021
1104 def branches(self, nodes):
1022 def branches(self, nodes):
1105 if not nodes:
1023 if not nodes:
1106 nodes = [self.changelog.tip()]
1024 nodes = [self.changelog.tip()]
1107 b = []
1025 b = []
1108 for n in nodes:
1026 for n in nodes:
1109 t = n
1027 t = n
1110 while 1:
1028 while 1:
1111 p = self.changelog.parents(n)
1029 p = self.changelog.parents(n)
1112 if p[1] != nullid or p[0] == nullid:
1030 if p[1] != nullid or p[0] == nullid:
1113 b.append((t, n, p[0], p[1]))
1031 b.append((t, n, p[0], p[1]))
1114 break
1032 break
1115 n = p[0]
1033 n = p[0]
1116 return b
1034 return b
1117
1035
1118 def between(self, pairs):
1036 def between(self, pairs):
1119 r = []
1037 r = []
1120
1038
1121 for top, bottom in pairs:
1039 for top, bottom in pairs:
1122 n, l, i = top, [], 0
1040 n, l, i = top, [], 0
1123 f = 1
1041 f = 1
1124
1042
1125 while n != bottom:
1043 while n != bottom:
1126 p = self.changelog.parents(n)[0]
1044 p = self.changelog.parents(n)[0]
1127 if i == f:
1045 if i == f:
1128 l.append(n)
1046 l.append(n)
1129 f = f * 2
1047 f = f * 2
1130 n = p
1048 n = p
1131 i += 1
1049 i += 1
1132
1050
1133 r.append(l)
1051 r.append(l)
1134
1052
1135 return r
1053 return r
1136
1054
1137 def findincoming(self, remote, base=None, heads=None, force=False):
1055 def findincoming(self, remote, base=None, heads=None, force=False):
1138 """Return list of roots of the subsets of missing nodes from remote
1056 """Return list of roots of the subsets of missing nodes from remote
1139
1057
1140 If base dict is specified, assume that these nodes and their parents
1058 If base dict is specified, assume that these nodes and their parents
1141 exist on the remote side and that no child of a node of base exists
1059 exist on the remote side and that no child of a node of base exists
1142 in both remote and self.
1060 in both remote and self.
1143 Furthermore base will be updated to include the nodes that exists
1061 Furthermore base will be updated to include the nodes that exists
1144 in self and remote but no children exists in self and remote.
1062 in self and remote but no children exists in self and remote.
1145 If a list of heads is specified, return only nodes which are heads
1063 If a list of heads is specified, return only nodes which are heads
1146 or ancestors of these heads.
1064 or ancestors of these heads.
1147
1065
1148 All the ancestors of base are in self and in remote.
1066 All the ancestors of base are in self and in remote.
1149 All the descendants of the list returned are missing in self.
1067 All the descendants of the list returned are missing in self.
1150 (and so we know that the rest of the nodes are missing in remote, see
1068 (and so we know that the rest of the nodes are missing in remote, see
1151 outgoing)
1069 outgoing)
1152 """
1070 """
1153 m = self.changelog.nodemap
1071 m = self.changelog.nodemap
1154 search = []
1072 search = []
1155 fetch = {}
1073 fetch = {}
1156 seen = {}
1074 seen = {}
1157 seenbranch = {}
1075 seenbranch = {}
1158 if base == None:
1076 if base == None:
1159 base = {}
1077 base = {}
1160
1078
1161 if not heads:
1079 if not heads:
1162 heads = remote.heads()
1080 heads = remote.heads()
1163
1081
1164 if self.changelog.tip() == nullid:
1082 if self.changelog.tip() == nullid:
1165 base[nullid] = 1
1083 base[nullid] = 1
1166 if heads != [nullid]:
1084 if heads != [nullid]:
1167 return [nullid]
1085 return [nullid]
1168 return []
1086 return []
1169
1087
1170 # assume we're closer to the tip than the root
1088 # assume we're closer to the tip than the root
1171 # and start by examining the heads
1089 # and start by examining the heads
1172 self.ui.status(_("searching for changes\n"))
1090 self.ui.status(_("searching for changes\n"))
1173
1091
1174 unknown = []
1092 unknown = []
1175 for h in heads:
1093 for h in heads:
1176 if h not in m:
1094 if h not in m:
1177 unknown.append(h)
1095 unknown.append(h)
1178 else:
1096 else:
1179 base[h] = 1
1097 base[h] = 1
1180
1098
1181 if not unknown:
1099 if not unknown:
1182 return []
1100 return []
1183
1101
1184 req = dict.fromkeys(unknown)
1102 req = dict.fromkeys(unknown)
1185 reqcnt = 0
1103 reqcnt = 0
1186
1104
1187 # search through remote branches
1105 # search through remote branches
1188 # a 'branch' here is a linear segment of history, with four parts:
1106 # a 'branch' here is a linear segment of history, with four parts:
1189 # head, root, first parent, second parent
1107 # head, root, first parent, second parent
1190 # (a branch always has two parents (or none) by definition)
1108 # (a branch always has two parents (or none) by definition)
1191 unknown = remote.branches(unknown)
1109 unknown = remote.branches(unknown)
1192 while unknown:
1110 while unknown:
1193 r = []
1111 r = []
1194 while unknown:
1112 while unknown:
1195 n = unknown.pop(0)
1113 n = unknown.pop(0)
1196 if n[0] in seen:
1114 if n[0] in seen:
1197 continue
1115 continue
1198
1116
1199 self.ui.debug(_("examining %s:%s\n")
1117 self.ui.debug(_("examining %s:%s\n")
1200 % (short(n[0]), short(n[1])))
1118 % (short(n[0]), short(n[1])))
1201 if n[0] == nullid: # found the end of the branch
1119 if n[0] == nullid: # found the end of the branch
1202 pass
1120 pass
1203 elif n in seenbranch:
1121 elif n in seenbranch:
1204 self.ui.debug(_("branch already found\n"))
1122 self.ui.debug(_("branch already found\n"))
1205 continue
1123 continue
1206 elif n[1] and n[1] in m: # do we know the base?
1124 elif n[1] and n[1] in m: # do we know the base?
1207 self.ui.debug(_("found incomplete branch %s:%s\n")
1125 self.ui.debug(_("found incomplete branch %s:%s\n")
1208 % (short(n[0]), short(n[1])))
1126 % (short(n[0]), short(n[1])))
1209 search.append(n) # schedule branch range for scanning
1127 search.append(n) # schedule branch range for scanning
1210 seenbranch[n] = 1
1128 seenbranch[n] = 1
1211 else:
1129 else:
1212 if n[1] not in seen and n[1] not in fetch:
1130 if n[1] not in seen and n[1] not in fetch:
1213 if n[2] in m and n[3] in m:
1131 if n[2] in m and n[3] in m:
1214 self.ui.debug(_("found new changeset %s\n") %
1132 self.ui.debug(_("found new changeset %s\n") %
1215 short(n[1]))
1133 short(n[1]))
1216 fetch[n[1]] = 1 # earliest unknown
1134 fetch[n[1]] = 1 # earliest unknown
1217 for p in n[2:4]:
1135 for p in n[2:4]:
1218 if p in m:
1136 if p in m:
1219 base[p] = 1 # latest known
1137 base[p] = 1 # latest known
1220
1138
1221 for p in n[2:4]:
1139 for p in n[2:4]:
1222 if p not in req and p not in m:
1140 if p not in req and p not in m:
1223 r.append(p)
1141 r.append(p)
1224 req[p] = 1
1142 req[p] = 1
1225 seen[n[0]] = 1
1143 seen[n[0]] = 1
1226
1144
1227 if r:
1145 if r:
1228 reqcnt += 1
1146 reqcnt += 1
1229 self.ui.debug(_("request %d: %s\n") %
1147 self.ui.debug(_("request %d: %s\n") %
1230 (reqcnt, " ".join(map(short, r))))
1148 (reqcnt, " ".join(map(short, r))))
1231 for p in xrange(0, len(r), 10):
1149 for p in xrange(0, len(r), 10):
1232 for b in remote.branches(r[p:p+10]):
1150 for b in remote.branches(r[p:p+10]):
1233 self.ui.debug(_("received %s:%s\n") %
1151 self.ui.debug(_("received %s:%s\n") %
1234 (short(b[0]), short(b[1])))
1152 (short(b[0]), short(b[1])))
1235 unknown.append(b)
1153 unknown.append(b)
1236
1154
1237 # do binary search on the branches we found
1155 # do binary search on the branches we found
1238 while search:
1156 while search:
1239 n = search.pop(0)
1157 n = search.pop(0)
1240 reqcnt += 1
1158 reqcnt += 1
1241 l = remote.between([(n[0], n[1])])[0]
1159 l = remote.between([(n[0], n[1])])[0]
1242 l.append(n[1])
1160 l.append(n[1])
1243 p = n[0]
1161 p = n[0]
1244 f = 1
1162 f = 1
1245 for i in l:
1163 for i in l:
1246 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1164 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1247 if i in m:
1165 if i in m:
1248 if f <= 2:
1166 if f <= 2:
1249 self.ui.debug(_("found new branch changeset %s\n") %
1167 self.ui.debug(_("found new branch changeset %s\n") %
1250 short(p))
1168 short(p))
1251 fetch[p] = 1
1169 fetch[p] = 1
1252 base[i] = 1
1170 base[i] = 1
1253 else:
1171 else:
1254 self.ui.debug(_("narrowed branch search to %s:%s\n")
1172 self.ui.debug(_("narrowed branch search to %s:%s\n")
1255 % (short(p), short(i)))
1173 % (short(p), short(i)))
1256 search.append((p, i))
1174 search.append((p, i))
1257 break
1175 break
1258 p, f = i, f * 2
1176 p, f = i, f * 2
1259
1177
1260 # sanity check our fetch list
1178 # sanity check our fetch list
1261 for f in fetch.keys():
1179 for f in fetch.keys():
1262 if f in m:
1180 if f in m:
1263 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1181 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1264
1182
1265 if base.keys() == [nullid]:
1183 if base.keys() == [nullid]:
1266 if force:
1184 if force:
1267 self.ui.warn(_("warning: repository is unrelated\n"))
1185 self.ui.warn(_("warning: repository is unrelated\n"))
1268 else:
1186 else:
1269 raise util.Abort(_("repository is unrelated"))
1187 raise util.Abort(_("repository is unrelated"))
1270
1188
1271 self.ui.debug(_("found new changesets starting at ") +
1189 self.ui.debug(_("found new changesets starting at ") +
1272 " ".join([short(f) for f in fetch]) + "\n")
1190 " ".join([short(f) for f in fetch]) + "\n")
1273
1191
1274 self.ui.debug(_("%d total queries\n") % reqcnt)
1192 self.ui.debug(_("%d total queries\n") % reqcnt)
1275
1193
1276 return fetch.keys()
1194 return fetch.keys()
1277
1195
1278 def findoutgoing(self, remote, base=None, heads=None, force=False):
1196 def findoutgoing(self, remote, base=None, heads=None, force=False):
1279 """Return list of nodes that are roots of subsets not in remote
1197 """Return list of nodes that are roots of subsets not in remote
1280
1198
1281 If base dict is specified, assume that these nodes and their parents
1199 If base dict is specified, assume that these nodes and their parents
1282 exist on the remote side.
1200 exist on the remote side.
1283 If a list of heads is specified, return only nodes which are heads
1201 If a list of heads is specified, return only nodes which are heads
1284 or ancestors of these heads, and return a second element which
1202 or ancestors of these heads, and return a second element which
1285 contains all remote heads which get new children.
1203 contains all remote heads which get new children.
1286 """
1204 """
1287 if base == None:
1205 if base == None:
1288 base = {}
1206 base = {}
1289 self.findincoming(remote, base, heads, force=force)
1207 self.findincoming(remote, base, heads, force=force)
1290
1208
1291 self.ui.debug(_("common changesets up to ")
1209 self.ui.debug(_("common changesets up to ")
1292 + " ".join(map(short, base.keys())) + "\n")
1210 + " ".join(map(short, base.keys())) + "\n")
1293
1211
1294 remain = dict.fromkeys(self.changelog.nodemap)
1212 remain = dict.fromkeys(self.changelog.nodemap)
1295
1213
1296 # prune everything remote has from the tree
1214 # prune everything remote has from the tree
1297 del remain[nullid]
1215 del remain[nullid]
1298 remove = base.keys()
1216 remove = base.keys()
1299 while remove:
1217 while remove:
1300 n = remove.pop(0)
1218 n = remove.pop(0)
1301 if n in remain:
1219 if n in remain:
1302 del remain[n]
1220 del remain[n]
1303 for p in self.changelog.parents(n):
1221 for p in self.changelog.parents(n):
1304 remove.append(p)
1222 remove.append(p)
1305
1223
1306 # find every node whose parents have been pruned
1224 # find every node whose parents have been pruned
1307 subset = []
1225 subset = []
1308 # find every remote head that will get new children
1226 # find every remote head that will get new children
1309 updated_heads = {}
1227 updated_heads = {}
1310 for n in remain:
1228 for n in remain:
1311 p1, p2 = self.changelog.parents(n)
1229 p1, p2 = self.changelog.parents(n)
1312 if p1 not in remain and p2 not in remain:
1230 if p1 not in remain and p2 not in remain:
1313 subset.append(n)
1231 subset.append(n)
1314 if heads:
1232 if heads:
1315 if p1 in heads:
1233 if p1 in heads:
1316 updated_heads[p1] = True
1234 updated_heads[p1] = True
1317 if p2 in heads:
1235 if p2 in heads:
1318 updated_heads[p2] = True
1236 updated_heads[p2] = True
1319
1237
1320 # this is the set of all roots we have to push
1238 # this is the set of all roots we have to push
1321 if heads:
1239 if heads:
1322 return subset, updated_heads.keys()
1240 return subset, updated_heads.keys()
1323 else:
1241 else:
1324 return subset
1242 return subset
1325
1243
1326 def pull(self, remote, heads=None, force=False, lock=None):
1244 def pull(self, remote, heads=None, force=False, lock=None):
1327 mylock = False
1245 mylock = False
1328 if not lock:
1246 if not lock:
1329 lock = self.lock()
1247 lock = self.lock()
1330 mylock = True
1248 mylock = True
1331
1249
1332 try:
1250 try:
1333 fetch = self.findincoming(remote, force=force)
1251 fetch = self.findincoming(remote, force=force)
1334 if fetch == [nullid]:
1252 if fetch == [nullid]:
1335 self.ui.status(_("requesting all changes\n"))
1253 self.ui.status(_("requesting all changes\n"))
1336
1254
1337 if not fetch:
1255 if not fetch:
1338 self.ui.status(_("no changes found\n"))
1256 self.ui.status(_("no changes found\n"))
1339 return 0
1257 return 0
1340
1258
1341 if heads is None:
1259 if heads is None:
1342 cg = remote.changegroup(fetch, 'pull')
1260 cg = remote.changegroup(fetch, 'pull')
1343 else:
1261 else:
1344 if 'changegroupsubset' not in remote.capabilities:
1262 if 'changegroupsubset' not in remote.capabilities:
1345 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1263 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1346 cg = remote.changegroupsubset(fetch, heads, 'pull')
1264 cg = remote.changegroupsubset(fetch, heads, 'pull')
1347 return self.addchangegroup(cg, 'pull', remote.url())
1265 return self.addchangegroup(cg, 'pull', remote.url())
1348 finally:
1266 finally:
1349 if mylock:
1267 if mylock:
1350 lock.release()
1268 lock.release()
1351
1269
1352 def push(self, remote, force=False, revs=None):
1270 def push(self, remote, force=False, revs=None):
1353 # there are two ways to push to remote repo:
1271 # there are two ways to push to remote repo:
1354 #
1272 #
1355 # addchangegroup assumes local user can lock remote
1273 # addchangegroup assumes local user can lock remote
1356 # repo (local filesystem, old ssh servers).
1274 # repo (local filesystem, old ssh servers).
1357 #
1275 #
1358 # unbundle assumes local user cannot lock remote repo (new ssh
1276 # unbundle assumes local user cannot lock remote repo (new ssh
1359 # servers, http servers).
1277 # servers, http servers).
1360
1278
1361 if remote.capable('unbundle'):
1279 if remote.capable('unbundle'):
1362 return self.push_unbundle(remote, force, revs)
1280 return self.push_unbundle(remote, force, revs)
1363 return self.push_addchangegroup(remote, force, revs)
1281 return self.push_addchangegroup(remote, force, revs)
1364
1282
1365 def prepush(self, remote, force, revs):
1283 def prepush(self, remote, force, revs):
1366 base = {}
1284 base = {}
1367 remote_heads = remote.heads()
1285 remote_heads = remote.heads()
1368 inc = self.findincoming(remote, base, remote_heads, force=force)
1286 inc = self.findincoming(remote, base, remote_heads, force=force)
1369
1287
1370 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1288 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1371 if revs is not None:
1289 if revs is not None:
1372 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1290 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1373 else:
1291 else:
1374 bases, heads = update, self.changelog.heads()
1292 bases, heads = update, self.changelog.heads()
1375
1293
1376 if not bases:
1294 if not bases:
1377 self.ui.status(_("no changes found\n"))
1295 self.ui.status(_("no changes found\n"))
1378 return None, 1
1296 return None, 1
1379 elif not force:
1297 elif not force:
1380 # check if we're creating new remote heads
1298 # check if we're creating new remote heads
1381 # to be a remote head after push, node must be either
1299 # to be a remote head after push, node must be either
1382 # - unknown locally
1300 # - unknown locally
1383 # - a local outgoing head descended from update
1301 # - a local outgoing head descended from update
1384 # - a remote head that's known locally and not
1302 # - a remote head that's known locally and not
1385 # ancestral to an outgoing head
1303 # ancestral to an outgoing head
1386
1304
1387 warn = 0
1305 warn = 0
1388
1306
1389 if remote_heads == [nullid]:
1307 if remote_heads == [nullid]:
1390 warn = 0
1308 warn = 0
1391 elif not revs and len(heads) > len(remote_heads):
1309 elif not revs and len(heads) > len(remote_heads):
1392 warn = 1
1310 warn = 1
1393 else:
1311 else:
1394 newheads = list(heads)
1312 newheads = list(heads)
1395 for r in remote_heads:
1313 for r in remote_heads:
1396 if r in self.changelog.nodemap:
1314 if r in self.changelog.nodemap:
1397 desc = self.changelog.heads(r, heads)
1315 desc = self.changelog.heads(r, heads)
1398 l = [h for h in heads if h in desc]
1316 l = [h for h in heads if h in desc]
1399 if not l:
1317 if not l:
1400 newheads.append(r)
1318 newheads.append(r)
1401 else:
1319 else:
1402 newheads.append(r)
1320 newheads.append(r)
1403 if len(newheads) > len(remote_heads):
1321 if len(newheads) > len(remote_heads):
1404 warn = 1
1322 warn = 1
1405
1323
1406 if warn:
1324 if warn:
1407 self.ui.warn(_("abort: push creates new remote branches!\n"))
1325 self.ui.warn(_("abort: push creates new remote branches!\n"))
1408 self.ui.status(_("(did you forget to merge?"
1326 self.ui.status(_("(did you forget to merge?"
1409 " use push -f to force)\n"))
1327 " use push -f to force)\n"))
1410 return None, 1
1328 return None, 1
1411 elif inc:
1329 elif inc:
1412 self.ui.warn(_("note: unsynced remote changes!\n"))
1330 self.ui.warn(_("note: unsynced remote changes!\n"))
1413
1331
1414
1332
1415 if revs is None:
1333 if revs is None:
1416 cg = self.changegroup(update, 'push')
1334 cg = self.changegroup(update, 'push')
1417 else:
1335 else:
1418 cg = self.changegroupsubset(update, revs, 'push')
1336 cg = self.changegroupsubset(update, revs, 'push')
1419 return cg, remote_heads
1337 return cg, remote_heads
1420
1338
1421 def push_addchangegroup(self, remote, force, revs):
1339 def push_addchangegroup(self, remote, force, revs):
1422 lock = remote.lock()
1340 lock = remote.lock()
1423
1341
1424 ret = self.prepush(remote, force, revs)
1342 ret = self.prepush(remote, force, revs)
1425 if ret[0] is not None:
1343 if ret[0] is not None:
1426 cg, remote_heads = ret
1344 cg, remote_heads = ret
1427 return remote.addchangegroup(cg, 'push', self.url())
1345 return remote.addchangegroup(cg, 'push', self.url())
1428 return ret[1]
1346 return ret[1]
1429
1347
1430 def push_unbundle(self, remote, force, revs):
1348 def push_unbundle(self, remote, force, revs):
1431 # local repo finds heads on server, finds out what revs it
1349 # local repo finds heads on server, finds out what revs it
1432 # must push. once revs transferred, if server finds it has
1350 # must push. once revs transferred, if server finds it has
1433 # different heads (someone else won commit/push race), server
1351 # different heads (someone else won commit/push race), server
1434 # aborts.
1352 # aborts.
1435
1353
1436 ret = self.prepush(remote, force, revs)
1354 ret = self.prepush(remote, force, revs)
1437 if ret[0] is not None:
1355 if ret[0] is not None:
1438 cg, remote_heads = ret
1356 cg, remote_heads = ret
1439 if force: remote_heads = ['force']
1357 if force: remote_heads = ['force']
1440 return remote.unbundle(cg, remote_heads, 'push')
1358 return remote.unbundle(cg, remote_heads, 'push')
1441 return ret[1]
1359 return ret[1]
1442
1360
1443 def changegroupinfo(self, nodes):
1361 def changegroupinfo(self, nodes):
1444 self.ui.note(_("%d changesets found\n") % len(nodes))
1362 self.ui.note(_("%d changesets found\n") % len(nodes))
1445 if self.ui.debugflag:
1363 if self.ui.debugflag:
1446 self.ui.debug(_("List of changesets:\n"))
1364 self.ui.debug(_("List of changesets:\n"))
1447 for node in nodes:
1365 for node in nodes:
1448 self.ui.debug("%s\n" % hex(node))
1366 self.ui.debug("%s\n" % hex(node))
1449
1367
1450 def changegroupsubset(self, bases, heads, source):
1368 def changegroupsubset(self, bases, heads, source):
1451 """This function generates a changegroup consisting of all the nodes
1369 """This function generates a changegroup consisting of all the nodes
1452 that are descendents of any of the bases, and ancestors of any of
1370 that are descendents of any of the bases, and ancestors of any of
1453 the heads.
1371 the heads.
1454
1372
1455 It is fairly complex as determining which filenodes and which
1373 It is fairly complex as determining which filenodes and which
1456 manifest nodes need to be included for the changeset to be complete
1374 manifest nodes need to be included for the changeset to be complete
1457 is non-trivial.
1375 is non-trivial.
1458
1376
1459 Another wrinkle is doing the reverse, figuring out which changeset in
1377 Another wrinkle is doing the reverse, figuring out which changeset in
1460 the changegroup a particular filenode or manifestnode belongs to."""
1378 the changegroup a particular filenode or manifestnode belongs to."""
1461
1379
1462 self.hook('preoutgoing', throw=True, source=source)
1380 self.hook('preoutgoing', throw=True, source=source)
1463
1381
1464 # Set up some initial variables
1382 # Set up some initial variables
1465 # Make it easy to refer to self.changelog
1383 # Make it easy to refer to self.changelog
1466 cl = self.changelog
1384 cl = self.changelog
1467 # msng is short for missing - compute the list of changesets in this
1385 # msng is short for missing - compute the list of changesets in this
1468 # changegroup.
1386 # changegroup.
1469 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1387 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1470 self.changegroupinfo(msng_cl_lst)
1388 self.changegroupinfo(msng_cl_lst)
1471 # Some bases may turn out to be superfluous, and some heads may be
1389 # Some bases may turn out to be superfluous, and some heads may be
1472 # too. nodesbetween will return the minimal set of bases and heads
1390 # too. nodesbetween will return the minimal set of bases and heads
1473 # necessary to re-create the changegroup.
1391 # necessary to re-create the changegroup.
1474
1392
1475 # Known heads are the list of heads that it is assumed the recipient
1393 # Known heads are the list of heads that it is assumed the recipient
1476 # of this changegroup will know about.
1394 # of this changegroup will know about.
1477 knownheads = {}
1395 knownheads = {}
1478 # We assume that all parents of bases are known heads.
1396 # We assume that all parents of bases are known heads.
1479 for n in bases:
1397 for n in bases:
1480 for p in cl.parents(n):
1398 for p in cl.parents(n):
1481 if p != nullid:
1399 if p != nullid:
1482 knownheads[p] = 1
1400 knownheads[p] = 1
1483 knownheads = knownheads.keys()
1401 knownheads = knownheads.keys()
1484 if knownheads:
1402 if knownheads:
1485 # Now that we know what heads are known, we can compute which
1403 # Now that we know what heads are known, we can compute which
1486 # changesets are known. The recipient must know about all
1404 # changesets are known. The recipient must know about all
1487 # changesets required to reach the known heads from the null
1405 # changesets required to reach the known heads from the null
1488 # changeset.
1406 # changeset.
1489 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1407 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1490 junk = None
1408 junk = None
1491 # Transform the list into an ersatz set.
1409 # Transform the list into an ersatz set.
1492 has_cl_set = dict.fromkeys(has_cl_set)
1410 has_cl_set = dict.fromkeys(has_cl_set)
1493 else:
1411 else:
1494 # If there were no known heads, the recipient cannot be assumed to
1412 # If there were no known heads, the recipient cannot be assumed to
1495 # know about any changesets.
1413 # know about any changesets.
1496 has_cl_set = {}
1414 has_cl_set = {}
1497
1415
1498 # Make it easy to refer to self.manifest
1416 # Make it easy to refer to self.manifest
1499 mnfst = self.manifest
1417 mnfst = self.manifest
1500 # We don't know which manifests are missing yet
1418 # We don't know which manifests are missing yet
1501 msng_mnfst_set = {}
1419 msng_mnfst_set = {}
1502 # Nor do we know which filenodes are missing.
1420 # Nor do we know which filenodes are missing.
1503 msng_filenode_set = {}
1421 msng_filenode_set = {}
1504
1422
1505 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1423 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1506 junk = None
1424 junk = None
1507
1425
1508 # A changeset always belongs to itself, so the changenode lookup
1426 # A changeset always belongs to itself, so the changenode lookup
1509 # function for a changenode is identity.
1427 # function for a changenode is identity.
1510 def identity(x):
1428 def identity(x):
1511 return x
1429 return x
1512
1430
1513 # A function generating function. Sets up an environment for the
1431 # A function generating function. Sets up an environment for the
1514 # inner function.
1432 # inner function.
1515 def cmp_by_rev_func(revlog):
1433 def cmp_by_rev_func(revlog):
1516 # Compare two nodes by their revision number in the environment's
1434 # Compare two nodes by their revision number in the environment's
1517 # revision history. Since the revision number both represents the
1435 # revision history. Since the revision number both represents the
1518 # most efficient order to read the nodes in, and represents a
1436 # most efficient order to read the nodes in, and represents a
1519 # topological sorting of the nodes, this function is often useful.
1437 # topological sorting of the nodes, this function is often useful.
1520 def cmp_by_rev(a, b):
1438 def cmp_by_rev(a, b):
1521 return cmp(revlog.rev(a), revlog.rev(b))
1439 return cmp(revlog.rev(a), revlog.rev(b))
1522 return cmp_by_rev
1440 return cmp_by_rev
1523
1441
1524 # If we determine that a particular file or manifest node must be a
1442 # If we determine that a particular file or manifest node must be a
1525 # node that the recipient of the changegroup will already have, we can
1443 # node that the recipient of the changegroup will already have, we can
1526 # also assume the recipient will have all the parents. This function
1444 # also assume the recipient will have all the parents. This function
1527 # prunes them from the set of missing nodes.
1445 # prunes them from the set of missing nodes.
1528 def prune_parents(revlog, hasset, msngset):
1446 def prune_parents(revlog, hasset, msngset):
1529 haslst = hasset.keys()
1447 haslst = hasset.keys()
1530 haslst.sort(cmp_by_rev_func(revlog))
1448 haslst.sort(cmp_by_rev_func(revlog))
1531 for node in haslst:
1449 for node in haslst:
1532 parentlst = [p for p in revlog.parents(node) if p != nullid]
1450 parentlst = [p for p in revlog.parents(node) if p != nullid]
1533 while parentlst:
1451 while parentlst:
1534 n = parentlst.pop()
1452 n = parentlst.pop()
1535 if n not in hasset:
1453 if n not in hasset:
1536 hasset[n] = 1
1454 hasset[n] = 1
1537 p = [p for p in revlog.parents(n) if p != nullid]
1455 p = [p for p in revlog.parents(n) if p != nullid]
1538 parentlst.extend(p)
1456 parentlst.extend(p)
1539 for n in hasset:
1457 for n in hasset:
1540 msngset.pop(n, None)
1458 msngset.pop(n, None)
1541
1459
1542 # This is a function generating function used to set up an environment
1460 # This is a function generating function used to set up an environment
1543 # for the inner function to execute in.
1461 # for the inner function to execute in.
1544 def manifest_and_file_collector(changedfileset):
1462 def manifest_and_file_collector(changedfileset):
1545 # This is an information gathering function that gathers
1463 # This is an information gathering function that gathers
1546 # information from each changeset node that goes out as part of
1464 # information from each changeset node that goes out as part of
1547 # the changegroup. The information gathered is a list of which
1465 # the changegroup. The information gathered is a list of which
1548 # manifest nodes are potentially required (the recipient may
1466 # manifest nodes are potentially required (the recipient may
1549 # already have them) and total list of all files which were
1467 # already have them) and total list of all files which were
1550 # changed in any changeset in the changegroup.
1468 # changed in any changeset in the changegroup.
1551 #
1469 #
1552 # We also remember the first changenode we saw any manifest
1470 # We also remember the first changenode we saw any manifest
1553 # referenced by so we can later determine which changenode 'owns'
1471 # referenced by so we can later determine which changenode 'owns'
1554 # the manifest.
1472 # the manifest.
1555 def collect_manifests_and_files(clnode):
1473 def collect_manifests_and_files(clnode):
1556 c = cl.read(clnode)
1474 c = cl.read(clnode)
1557 for f in c[3]:
1475 for f in c[3]:
1558 # This is to make sure we only have one instance of each
1476 # This is to make sure we only have one instance of each
1559 # filename string for each filename.
1477 # filename string for each filename.
1560 changedfileset.setdefault(f, f)
1478 changedfileset.setdefault(f, f)
1561 msng_mnfst_set.setdefault(c[0], clnode)
1479 msng_mnfst_set.setdefault(c[0], clnode)
1562 return collect_manifests_and_files
1480 return collect_manifests_and_files
1563
1481
1564 # Figure out which manifest nodes (of the ones we think might be part
1482 # Figure out which manifest nodes (of the ones we think might be part
1565 # of the changegroup) the recipient must know about and remove them
1483 # of the changegroup) the recipient must know about and remove them
1566 # from the changegroup.
1484 # from the changegroup.
1567 def prune_manifests():
1485 def prune_manifests():
1568 has_mnfst_set = {}
1486 has_mnfst_set = {}
1569 for n in msng_mnfst_set:
1487 for n in msng_mnfst_set:
1570 # If a 'missing' manifest thinks it belongs to a changenode
1488 # If a 'missing' manifest thinks it belongs to a changenode
1571 # the recipient is assumed to have, obviously the recipient
1489 # the recipient is assumed to have, obviously the recipient
1572 # must have that manifest.
1490 # must have that manifest.
1573 linknode = cl.node(mnfst.linkrev(n))
1491 linknode = cl.node(mnfst.linkrev(n))
1574 if linknode in has_cl_set:
1492 if linknode in has_cl_set:
1575 has_mnfst_set[n] = 1
1493 has_mnfst_set[n] = 1
1576 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1494 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1577
1495
1578 # Use the information collected in collect_manifests_and_files to say
1496 # Use the information collected in collect_manifests_and_files to say
1579 # which changenode any manifestnode belongs to.
1497 # which changenode any manifestnode belongs to.
1580 def lookup_manifest_link(mnfstnode):
1498 def lookup_manifest_link(mnfstnode):
1581 return msng_mnfst_set[mnfstnode]
1499 return msng_mnfst_set[mnfstnode]
1582
1500
1583 # A function generating function that sets up the initial environment
1501 # A function generating function that sets up the initial environment
1584 # the inner function.
1502 # the inner function.
1585 def filenode_collector(changedfiles):
1503 def filenode_collector(changedfiles):
1586 next_rev = [0]
1504 next_rev = [0]
1587 # This gathers information from each manifestnode included in the
1505 # This gathers information from each manifestnode included in the
1588 # changegroup about which filenodes the manifest node references
1506 # changegroup about which filenodes the manifest node references
1589 # so we can include those in the changegroup too.
1507 # so we can include those in the changegroup too.
1590 #
1508 #
1591 # It also remembers which changenode each filenode belongs to. It
1509 # It also remembers which changenode each filenode belongs to. It
1592 # does this by assuming the a filenode belongs to the changenode
1510 # does this by assuming the a filenode belongs to the changenode
1593 # the first manifest that references it belongs to.
1511 # the first manifest that references it belongs to.
1594 def collect_msng_filenodes(mnfstnode):
1512 def collect_msng_filenodes(mnfstnode):
1595 r = mnfst.rev(mnfstnode)
1513 r = mnfst.rev(mnfstnode)
1596 if r == next_rev[0]:
1514 if r == next_rev[0]:
1597 # If the last rev we looked at was the one just previous,
1515 # If the last rev we looked at was the one just previous,
1598 # we only need to see a diff.
1516 # we only need to see a diff.
1599 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1517 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1600 # For each line in the delta
1518 # For each line in the delta
1601 for dline in delta.splitlines():
1519 for dline in delta.splitlines():
1602 # get the filename and filenode for that line
1520 # get the filename and filenode for that line
1603 f, fnode = dline.split('\0')
1521 f, fnode = dline.split('\0')
1604 fnode = bin(fnode[:40])
1522 fnode = bin(fnode[:40])
1605 f = changedfiles.get(f, None)
1523 f = changedfiles.get(f, None)
1606 # And if the file is in the list of files we care
1524 # And if the file is in the list of files we care
1607 # about.
1525 # about.
1608 if f is not None:
1526 if f is not None:
1609 # Get the changenode this manifest belongs to
1527 # Get the changenode this manifest belongs to
1610 clnode = msng_mnfst_set[mnfstnode]
1528 clnode = msng_mnfst_set[mnfstnode]
1611 # Create the set of filenodes for the file if
1529 # Create the set of filenodes for the file if
1612 # there isn't one already.
1530 # there isn't one already.
1613 ndset = msng_filenode_set.setdefault(f, {})
1531 ndset = msng_filenode_set.setdefault(f, {})
1614 # And set the filenode's changelog node to the
1532 # And set the filenode's changelog node to the
1615 # manifest's if it hasn't been set already.
1533 # manifest's if it hasn't been set already.
1616 ndset.setdefault(fnode, clnode)
1534 ndset.setdefault(fnode, clnode)
1617 else:
1535 else:
1618 # Otherwise we need a full manifest.
1536 # Otherwise we need a full manifest.
1619 m = mnfst.read(mnfstnode)
1537 m = mnfst.read(mnfstnode)
1620 # For every file in we care about.
1538 # For every file in we care about.
1621 for f in changedfiles:
1539 for f in changedfiles:
1622 fnode = m.get(f, None)
1540 fnode = m.get(f, None)
1623 # If it's in the manifest
1541 # If it's in the manifest
1624 if fnode is not None:
1542 if fnode is not None:
1625 # See comments above.
1543 # See comments above.
1626 clnode = msng_mnfst_set[mnfstnode]
1544 clnode = msng_mnfst_set[mnfstnode]
1627 ndset = msng_filenode_set.setdefault(f, {})
1545 ndset = msng_filenode_set.setdefault(f, {})
1628 ndset.setdefault(fnode, clnode)
1546 ndset.setdefault(fnode, clnode)
1629 # Remember the revision we hope to see next.
1547 # Remember the revision we hope to see next.
1630 next_rev[0] = r + 1
1548 next_rev[0] = r + 1
1631 return collect_msng_filenodes
1549 return collect_msng_filenodes
1632
1550
1633 # We have a list of filenodes we think we need for a file, lets remove
1551 # We have a list of filenodes we think we need for a file, lets remove
1634 # all those we now the recipient must have.
1552 # all those we now the recipient must have.
1635 def prune_filenodes(f, filerevlog):
1553 def prune_filenodes(f, filerevlog):
1636 msngset = msng_filenode_set[f]
1554 msngset = msng_filenode_set[f]
1637 hasset = {}
1555 hasset = {}
1638 # If a 'missing' filenode thinks it belongs to a changenode we
1556 # If a 'missing' filenode thinks it belongs to a changenode we
1639 # assume the recipient must have, then the recipient must have
1557 # assume the recipient must have, then the recipient must have
1640 # that filenode.
1558 # that filenode.
1641 for n in msngset:
1559 for n in msngset:
1642 clnode = cl.node(filerevlog.linkrev(n))
1560 clnode = cl.node(filerevlog.linkrev(n))
1643 if clnode in has_cl_set:
1561 if clnode in has_cl_set:
1644 hasset[n] = 1
1562 hasset[n] = 1
1645 prune_parents(filerevlog, hasset, msngset)
1563 prune_parents(filerevlog, hasset, msngset)
1646
1564
1647 # A function generator function that sets up the a context for the
1565 # A function generator function that sets up the a context for the
1648 # inner function.
1566 # inner function.
1649 def lookup_filenode_link_func(fname):
1567 def lookup_filenode_link_func(fname):
1650 msngset = msng_filenode_set[fname]
1568 msngset = msng_filenode_set[fname]
1651 # Lookup the changenode the filenode belongs to.
1569 # Lookup the changenode the filenode belongs to.
1652 def lookup_filenode_link(fnode):
1570 def lookup_filenode_link(fnode):
1653 return msngset[fnode]
1571 return msngset[fnode]
1654 return lookup_filenode_link
1572 return lookup_filenode_link
1655
1573
1656 # Now that we have all theses utility functions to help out and
1574 # Now that we have all theses utility functions to help out and
1657 # logically divide up the task, generate the group.
1575 # logically divide up the task, generate the group.
1658 def gengroup():
1576 def gengroup():
1659 # The set of changed files starts empty.
1577 # The set of changed files starts empty.
1660 changedfiles = {}
1578 changedfiles = {}
1661 # Create a changenode group generator that will call our functions
1579 # Create a changenode group generator that will call our functions
1662 # back to lookup the owning changenode and collect information.
1580 # back to lookup the owning changenode and collect information.
1663 group = cl.group(msng_cl_lst, identity,
1581 group = cl.group(msng_cl_lst, identity,
1664 manifest_and_file_collector(changedfiles))
1582 manifest_and_file_collector(changedfiles))
1665 for chnk in group:
1583 for chnk in group:
1666 yield chnk
1584 yield chnk
1667
1585
1668 # The list of manifests has been collected by the generator
1586 # The list of manifests has been collected by the generator
1669 # calling our functions back.
1587 # calling our functions back.
1670 prune_manifests()
1588 prune_manifests()
1671 msng_mnfst_lst = msng_mnfst_set.keys()
1589 msng_mnfst_lst = msng_mnfst_set.keys()
1672 # Sort the manifestnodes by revision number.
1590 # Sort the manifestnodes by revision number.
1673 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1591 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1674 # Create a generator for the manifestnodes that calls our lookup
1592 # Create a generator for the manifestnodes that calls our lookup
1675 # and data collection functions back.
1593 # and data collection functions back.
1676 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1594 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1677 filenode_collector(changedfiles))
1595 filenode_collector(changedfiles))
1678 for chnk in group:
1596 for chnk in group:
1679 yield chnk
1597 yield chnk
1680
1598
1681 # These are no longer needed, dereference and toss the memory for
1599 # These are no longer needed, dereference and toss the memory for
1682 # them.
1600 # them.
1683 msng_mnfst_lst = None
1601 msng_mnfst_lst = None
1684 msng_mnfst_set.clear()
1602 msng_mnfst_set.clear()
1685
1603
1686 changedfiles = changedfiles.keys()
1604 changedfiles = changedfiles.keys()
1687 changedfiles.sort()
1605 changedfiles.sort()
1688 # Go through all our files in order sorted by name.
1606 # Go through all our files in order sorted by name.
1689 for fname in changedfiles:
1607 for fname in changedfiles:
1690 filerevlog = self.file(fname)
1608 filerevlog = self.file(fname)
1691 # Toss out the filenodes that the recipient isn't really
1609 # Toss out the filenodes that the recipient isn't really
1692 # missing.
1610 # missing.
1693 if msng_filenode_set.has_key(fname):
1611 if msng_filenode_set.has_key(fname):
1694 prune_filenodes(fname, filerevlog)
1612 prune_filenodes(fname, filerevlog)
1695 msng_filenode_lst = msng_filenode_set[fname].keys()
1613 msng_filenode_lst = msng_filenode_set[fname].keys()
1696 else:
1614 else:
1697 msng_filenode_lst = []
1615 msng_filenode_lst = []
1698 # If any filenodes are left, generate the group for them,
1616 # If any filenodes are left, generate the group for them,
1699 # otherwise don't bother.
1617 # otherwise don't bother.
1700 if len(msng_filenode_lst) > 0:
1618 if len(msng_filenode_lst) > 0:
1701 yield changegroup.genchunk(fname)
1619 yield changegroup.genchunk(fname)
1702 # Sort the filenodes by their revision #
1620 # Sort the filenodes by their revision #
1703 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1621 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1704 # Create a group generator and only pass in a changenode
1622 # Create a group generator and only pass in a changenode
1705 # lookup function as we need to collect no information
1623 # lookup function as we need to collect no information
1706 # from filenodes.
1624 # from filenodes.
1707 group = filerevlog.group(msng_filenode_lst,
1625 group = filerevlog.group(msng_filenode_lst,
1708 lookup_filenode_link_func(fname))
1626 lookup_filenode_link_func(fname))
1709 for chnk in group:
1627 for chnk in group:
1710 yield chnk
1628 yield chnk
1711 if msng_filenode_set.has_key(fname):
1629 if msng_filenode_set.has_key(fname):
1712 # Don't need this anymore, toss it to free memory.
1630 # Don't need this anymore, toss it to free memory.
1713 del msng_filenode_set[fname]
1631 del msng_filenode_set[fname]
1714 # Signal that no more groups are left.
1632 # Signal that no more groups are left.
1715 yield changegroup.closechunk()
1633 yield changegroup.closechunk()
1716
1634
1717 if msng_cl_lst:
1635 if msng_cl_lst:
1718 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1636 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1719
1637
1720 return util.chunkbuffer(gengroup())
1638 return util.chunkbuffer(gengroup())
1721
1639
1722 def changegroup(self, basenodes, source):
1640 def changegroup(self, basenodes, source):
1723 """Generate a changegroup of all nodes that we have that a recipient
1641 """Generate a changegroup of all nodes that we have that a recipient
1724 doesn't.
1642 doesn't.
1725
1643
1726 This is much easier than the previous function as we can assume that
1644 This is much easier than the previous function as we can assume that
1727 the recipient has any changenode we aren't sending them."""
1645 the recipient has any changenode we aren't sending them."""
1728
1646
1729 self.hook('preoutgoing', throw=True, source=source)
1647 self.hook('preoutgoing', throw=True, source=source)
1730
1648
1731 cl = self.changelog
1649 cl = self.changelog
1732 nodes = cl.nodesbetween(basenodes, None)[0]
1650 nodes = cl.nodesbetween(basenodes, None)[0]
1733 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1651 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1734 self.changegroupinfo(nodes)
1652 self.changegroupinfo(nodes)
1735
1653
1736 def identity(x):
1654 def identity(x):
1737 return x
1655 return x
1738
1656
1739 def gennodelst(revlog):
1657 def gennodelst(revlog):
1740 for r in xrange(0, revlog.count()):
1658 for r in xrange(0, revlog.count()):
1741 n = revlog.node(r)
1659 n = revlog.node(r)
1742 if revlog.linkrev(n) in revset:
1660 if revlog.linkrev(n) in revset:
1743 yield n
1661 yield n
1744
1662
1745 def changed_file_collector(changedfileset):
1663 def changed_file_collector(changedfileset):
1746 def collect_changed_files(clnode):
1664 def collect_changed_files(clnode):
1747 c = cl.read(clnode)
1665 c = cl.read(clnode)
1748 for fname in c[3]:
1666 for fname in c[3]:
1749 changedfileset[fname] = 1
1667 changedfileset[fname] = 1
1750 return collect_changed_files
1668 return collect_changed_files
1751
1669
1752 def lookuprevlink_func(revlog):
1670 def lookuprevlink_func(revlog):
1753 def lookuprevlink(n):
1671 def lookuprevlink(n):
1754 return cl.node(revlog.linkrev(n))
1672 return cl.node(revlog.linkrev(n))
1755 return lookuprevlink
1673 return lookuprevlink
1756
1674
1757 def gengroup():
1675 def gengroup():
1758 # construct a list of all changed files
1676 # construct a list of all changed files
1759 changedfiles = {}
1677 changedfiles = {}
1760
1678
1761 for chnk in cl.group(nodes, identity,
1679 for chnk in cl.group(nodes, identity,
1762 changed_file_collector(changedfiles)):
1680 changed_file_collector(changedfiles)):
1763 yield chnk
1681 yield chnk
1764 changedfiles = changedfiles.keys()
1682 changedfiles = changedfiles.keys()
1765 changedfiles.sort()
1683 changedfiles.sort()
1766
1684
1767 mnfst = self.manifest
1685 mnfst = self.manifest
1768 nodeiter = gennodelst(mnfst)
1686 nodeiter = gennodelst(mnfst)
1769 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1687 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1770 yield chnk
1688 yield chnk
1771
1689
1772 for fname in changedfiles:
1690 for fname in changedfiles:
1773 filerevlog = self.file(fname)
1691 filerevlog = self.file(fname)
1774 nodeiter = gennodelst(filerevlog)
1692 nodeiter = gennodelst(filerevlog)
1775 nodeiter = list(nodeiter)
1693 nodeiter = list(nodeiter)
1776 if nodeiter:
1694 if nodeiter:
1777 yield changegroup.genchunk(fname)
1695 yield changegroup.genchunk(fname)
1778 lookup = lookuprevlink_func(filerevlog)
1696 lookup = lookuprevlink_func(filerevlog)
1779 for chnk in filerevlog.group(nodeiter, lookup):
1697 for chnk in filerevlog.group(nodeiter, lookup):
1780 yield chnk
1698 yield chnk
1781
1699
1782 yield changegroup.closechunk()
1700 yield changegroup.closechunk()
1783
1701
1784 if nodes:
1702 if nodes:
1785 self.hook('outgoing', node=hex(nodes[0]), source=source)
1703 self.hook('outgoing', node=hex(nodes[0]), source=source)
1786
1704
1787 return util.chunkbuffer(gengroup())
1705 return util.chunkbuffer(gengroup())
1788
1706
1789 def addchangegroup(self, source, srctype, url):
1707 def addchangegroup(self, source, srctype, url):
1790 """add changegroup to repo.
1708 """add changegroup to repo.
1791
1709
1792 return values:
1710 return values:
1793 - nothing changed or no source: 0
1711 - nothing changed or no source: 0
1794 - more heads than before: 1+added heads (2..n)
1712 - more heads than before: 1+added heads (2..n)
1795 - less heads than before: -1-removed heads (-2..-n)
1713 - less heads than before: -1-removed heads (-2..-n)
1796 - number of heads stays the same: 1
1714 - number of heads stays the same: 1
1797 """
1715 """
1798 def csmap(x):
1716 def csmap(x):
1799 self.ui.debug(_("add changeset %s\n") % short(x))
1717 self.ui.debug(_("add changeset %s\n") % short(x))
1800 return cl.count()
1718 return cl.count()
1801
1719
1802 def revmap(x):
1720 def revmap(x):
1803 return cl.rev(x)
1721 return cl.rev(x)
1804
1722
1805 if not source:
1723 if not source:
1806 return 0
1724 return 0
1807
1725
1808 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1726 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1809
1727
1810 changesets = files = revisions = 0
1728 changesets = files = revisions = 0
1811
1729
1812 tr = self.transaction()
1730 tr = self.transaction()
1813
1731
1814 # write changelog data to temp files so concurrent readers will not see
1732 # write changelog data to temp files so concurrent readers will not see
1815 # inconsistent view
1733 # inconsistent view
1816 cl = self.changelog
1734 cl = self.changelog
1817 cl.delayupdate()
1735 cl.delayupdate()
1818 oldheads = len(cl.heads())
1736 oldheads = len(cl.heads())
1819
1737
1820 # pull off the changeset group
1738 # pull off the changeset group
1821 self.ui.status(_("adding changesets\n"))
1739 self.ui.status(_("adding changesets\n"))
1822 cor = cl.count() - 1
1740 cor = cl.count() - 1
1823 chunkiter = changegroup.chunkiter(source)
1741 chunkiter = changegroup.chunkiter(source)
1824 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1742 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1825 raise util.Abort(_("received changelog group is empty"))
1743 raise util.Abort(_("received changelog group is empty"))
1826 cnr = cl.count() - 1
1744 cnr = cl.count() - 1
1827 changesets = cnr - cor
1745 changesets = cnr - cor
1828
1746
1829 # pull off the manifest group
1747 # pull off the manifest group
1830 self.ui.status(_("adding manifests\n"))
1748 self.ui.status(_("adding manifests\n"))
1831 chunkiter = changegroup.chunkiter(source)
1749 chunkiter = changegroup.chunkiter(source)
1832 # no need to check for empty manifest group here:
1750 # no need to check for empty manifest group here:
1833 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1751 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1834 # no new manifest will be created and the manifest group will
1752 # no new manifest will be created and the manifest group will
1835 # be empty during the pull
1753 # be empty during the pull
1836 self.manifest.addgroup(chunkiter, revmap, tr)
1754 self.manifest.addgroup(chunkiter, revmap, tr)
1837
1755
1838 # process the files
1756 # process the files
1839 self.ui.status(_("adding file changes\n"))
1757 self.ui.status(_("adding file changes\n"))
1840 while 1:
1758 while 1:
1841 f = changegroup.getchunk(source)
1759 f = changegroup.getchunk(source)
1842 if not f:
1760 if not f:
1843 break
1761 break
1844 self.ui.debug(_("adding %s revisions\n") % f)
1762 self.ui.debug(_("adding %s revisions\n") % f)
1845 fl = self.file(f)
1763 fl = self.file(f)
1846 o = fl.count()
1764 o = fl.count()
1847 chunkiter = changegroup.chunkiter(source)
1765 chunkiter = changegroup.chunkiter(source)
1848 if fl.addgroup(chunkiter, revmap, tr) is None:
1766 if fl.addgroup(chunkiter, revmap, tr) is None:
1849 raise util.Abort(_("received file revlog group is empty"))
1767 raise util.Abort(_("received file revlog group is empty"))
1850 revisions += fl.count() - o
1768 revisions += fl.count() - o
1851 files += 1
1769 files += 1
1852
1770
1853 # make changelog see real files again
1771 # make changelog see real files again
1854 cl.finalize(tr)
1772 cl.finalize(tr)
1855
1773
1856 newheads = len(self.changelog.heads())
1774 newheads = len(self.changelog.heads())
1857 heads = ""
1775 heads = ""
1858 if oldheads and newheads != oldheads:
1776 if oldheads and newheads != oldheads:
1859 heads = _(" (%+d heads)") % (newheads - oldheads)
1777 heads = _(" (%+d heads)") % (newheads - oldheads)
1860
1778
1861 self.ui.status(_("added %d changesets"
1779 self.ui.status(_("added %d changesets"
1862 " with %d changes to %d files%s\n")
1780 " with %d changes to %d files%s\n")
1863 % (changesets, revisions, files, heads))
1781 % (changesets, revisions, files, heads))
1864
1782
1865 if changesets > 0:
1783 if changesets > 0:
1866 self.hook('pretxnchangegroup', throw=True,
1784 self.hook('pretxnchangegroup', throw=True,
1867 node=hex(self.changelog.node(cor+1)), source=srctype,
1785 node=hex(self.changelog.node(cor+1)), source=srctype,
1868 url=url)
1786 url=url)
1869
1787
1870 tr.close()
1788 tr.close()
1871
1789
1872 if changesets > 0:
1790 if changesets > 0:
1873 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1791 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1874 source=srctype, url=url)
1792 source=srctype, url=url)
1875
1793
1876 for i in xrange(cor + 1, cnr + 1):
1794 for i in xrange(cor + 1, cnr + 1):
1877 self.hook("incoming", node=hex(self.changelog.node(i)),
1795 self.hook("incoming", node=hex(self.changelog.node(i)),
1878 source=srctype, url=url)
1796 source=srctype, url=url)
1879
1797
1880 # never return 0 here:
1798 # never return 0 here:
1881 if newheads < oldheads:
1799 if newheads < oldheads:
1882 return newheads - oldheads - 1
1800 return newheads - oldheads - 1
1883 else:
1801 else:
1884 return newheads - oldheads + 1
1802 return newheads - oldheads + 1
1885
1803
1886
1804
1887 def stream_in(self, remote):
1805 def stream_in(self, remote):
1888 fp = remote.stream_out()
1806 fp = remote.stream_out()
1889 l = fp.readline()
1807 l = fp.readline()
1890 try:
1808 try:
1891 resp = int(l)
1809 resp = int(l)
1892 except ValueError:
1810 except ValueError:
1893 raise util.UnexpectedOutput(
1811 raise util.UnexpectedOutput(
1894 _('Unexpected response from remote server:'), l)
1812 _('Unexpected response from remote server:'), l)
1895 if resp == 1:
1813 if resp == 1:
1896 raise util.Abort(_('operation forbidden by server'))
1814 raise util.Abort(_('operation forbidden by server'))
1897 elif resp == 2:
1815 elif resp == 2:
1898 raise util.Abort(_('locking the remote repository failed'))
1816 raise util.Abort(_('locking the remote repository failed'))
1899 elif resp != 0:
1817 elif resp != 0:
1900 raise util.Abort(_('the server sent an unknown error code'))
1818 raise util.Abort(_('the server sent an unknown error code'))
1901 self.ui.status(_('streaming all changes\n'))
1819 self.ui.status(_('streaming all changes\n'))
1902 l = fp.readline()
1820 l = fp.readline()
1903 try:
1821 try:
1904 total_files, total_bytes = map(int, l.split(' ', 1))
1822 total_files, total_bytes = map(int, l.split(' ', 1))
1905 except ValueError, TypeError:
1823 except ValueError, TypeError:
1906 raise util.UnexpectedOutput(
1824 raise util.UnexpectedOutput(
1907 _('Unexpected response from remote server:'), l)
1825 _('Unexpected response from remote server:'), l)
1908 self.ui.status(_('%d files to transfer, %s of data\n') %
1826 self.ui.status(_('%d files to transfer, %s of data\n') %
1909 (total_files, util.bytecount(total_bytes)))
1827 (total_files, util.bytecount(total_bytes)))
1910 start = time.time()
1828 start = time.time()
1911 for i in xrange(total_files):
1829 for i in xrange(total_files):
1912 # XXX doesn't support '\n' or '\r' in filenames
1830 # XXX doesn't support '\n' or '\r' in filenames
1913 l = fp.readline()
1831 l = fp.readline()
1914 try:
1832 try:
1915 name, size = l.split('\0', 1)
1833 name, size = l.split('\0', 1)
1916 size = int(size)
1834 size = int(size)
1917 except ValueError, TypeError:
1835 except ValueError, TypeError:
1918 raise util.UnexpectedOutput(
1836 raise util.UnexpectedOutput(
1919 _('Unexpected response from remote server:'), l)
1837 _('Unexpected response from remote server:'), l)
1920 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1838 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1921 ofp = self.sopener(name, 'w')
1839 ofp = self.sopener(name, 'w')
1922 for chunk in util.filechunkiter(fp, limit=size):
1840 for chunk in util.filechunkiter(fp, limit=size):
1923 ofp.write(chunk)
1841 ofp.write(chunk)
1924 ofp.close()
1842 ofp.close()
1925 elapsed = time.time() - start
1843 elapsed = time.time() - start
1926 if elapsed <= 0:
1844 if elapsed <= 0:
1927 elapsed = 0.001
1845 elapsed = 0.001
1928 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1846 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1929 (util.bytecount(total_bytes), elapsed,
1847 (util.bytecount(total_bytes), elapsed,
1930 util.bytecount(total_bytes / elapsed)))
1848 util.bytecount(total_bytes / elapsed)))
1931 self.invalidate()
1849 self.invalidate()
1932 return len(self.heads()) + 1
1850 return len(self.heads()) + 1
1933
1851
1934 def clone(self, remote, heads=[], stream=False):
1852 def clone(self, remote, heads=[], stream=False):
1935 '''clone remote repository.
1853 '''clone remote repository.
1936
1854
1937 keyword arguments:
1855 keyword arguments:
1938 heads: list of revs to clone (forces use of pull)
1856 heads: list of revs to clone (forces use of pull)
1939 stream: use streaming clone if possible'''
1857 stream: use streaming clone if possible'''
1940
1858
1941 # now, all clients that can request uncompressed clones can
1859 # now, all clients that can request uncompressed clones can
1942 # read repo formats supported by all servers that can serve
1860 # read repo formats supported by all servers that can serve
1943 # them.
1861 # them.
1944
1862
1945 # if revlog format changes, client will have to check version
1863 # if revlog format changes, client will have to check version
1946 # and format flags on "stream" capability, and use
1864 # and format flags on "stream" capability, and use
1947 # uncompressed only if compatible.
1865 # uncompressed only if compatible.
1948
1866
1949 if stream and not heads and remote.capable('stream'):
1867 if stream and not heads and remote.capable('stream'):
1950 return self.stream_in(remote)
1868 return self.stream_in(remote)
1951 return self.pull(remote, heads)
1869 return self.pull(remote, heads)
1952
1870
1953 # used to avoid circular references so destructors work
1871 # used to avoid circular references so destructors work
1954 def aftertrans(files):
1872 def aftertrans(files):
1955 renamefiles = [tuple(t) for t in files]
1873 renamefiles = [tuple(t) for t in files]
1956 def a():
1874 def a():
1957 for src, dest in renamefiles:
1875 for src, dest in renamefiles:
1958 util.rename(src, dest)
1876 util.rename(src, dest)
1959 return a
1877 return a
1960
1878
1961 def instance(ui, path, create):
1879 def instance(ui, path, create):
1962 return localrepository(ui, util.drop_scheme('file', path), create)
1880 return localrepository(ui, util.drop_scheme('file', path), create)
1963
1881
1964 def islocal(path):
1882 def islocal(path):
1965 return True
1883 return True
General Comments 0
You need to be logged in to leave comments. Login now