##// END OF EJS Templates
Warn about large files on hg add
Matt Mackall -
r4475:e8a58406 default
parent child Browse files
Show More
@@ -1,1944 +1,1950
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.root = os.path.realpath(path)
33 self.root = os.path.realpath(path)
34 self.path = os.path.join(self.root, ".hg")
34 self.path = os.path.join(self.root, ".hg")
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 requirements = ["revlogv1"]
44 requirements = ["revlogv1"]
45 if parentui.configbool('format', 'usestore', True):
45 if parentui.configbool('format', 'usestore', True):
46 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements.append("store")
47 requirements.append("store")
48 # create an invalid changelog
48 # create an invalid changelog
49 self.opener("00changelog.i", "a").write(
49 self.opener("00changelog.i", "a").write(
50 '\0\0\0\2' # represents revlogv2
50 '\0\0\0\2' # represents revlogv2
51 ' dummy changelog to prevent using the old repo layout'
51 ' dummy changelog to prevent using the old repo layout'
52 )
52 )
53 reqfile = self.opener("requires", "w")
53 reqfile = self.opener("requires", "w")
54 for r in requirements:
54 for r in requirements:
55 reqfile.write("%s\n" % r)
55 reqfile.write("%s\n" % r)
56 reqfile.close()
56 reqfile.close()
57 else:
57 else:
58 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
59 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
61 else:
62 # find requirements
62 # find requirements
63 try:
63 try:
64 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
65 except IOError, inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 requirements = []
68 requirements = []
69 # check them
69 # check them
70 for r in requirements:
70 for r in requirements:
71 if r not in self.supported:
71 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
73
74 # setup store
74 # setup store
75 if "store" in requirements:
75 if "store" in requirements:
76 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
79 else:
79 else:
80 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.spath = self.path
82 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
84
85 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
86 try:
86 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
88 except IOError:
89 pass
89 pass
90
90
91 self.changelog = changelog.changelog(self.sopener)
91 self.changelog = changelog.changelog(self.sopener)
92 self.sopener.defversion = self.changelog.version
92 self.sopener.defversion = self.changelog.version
93 self.manifest = manifest.manifest(self.sopener)
93 self.manifest = manifest.manifest(self.sopener)
94
94
95 fallback = self.ui.config('ui', 'fallbackencoding')
95 fallback = self.ui.config('ui', 'fallbackencoding')
96 if fallback:
96 if fallback:
97 util._fallbackencoding = fallback
97 util._fallbackencoding = fallback
98
98
99 self.tagscache = None
99 self.tagscache = None
100 self.branchcache = None
100 self.branchcache = None
101 self.nodetagscache = None
101 self.nodetagscache = None
102 self.filterpats = {}
102 self.filterpats = {}
103 self.transhandle = None
103 self.transhandle = None
104
104
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
106
106
107 def url(self):
107 def url(self):
108 return 'file:' + self.root
108 return 'file:' + self.root
109
109
110 def hook(self, name, throw=False, **args):
110 def hook(self, name, throw=False, **args):
111 def callhook(hname, funcname):
111 def callhook(hname, funcname):
112 '''call python hook. hook is callable object, looked up as
112 '''call python hook. hook is callable object, looked up as
113 name in python module. if callable returns "true", hook
113 name in python module. if callable returns "true", hook
114 fails, else passes. if hook raises exception, treated as
114 fails, else passes. if hook raises exception, treated as
115 hook failure. exception propagates if throw is "true".
115 hook failure. exception propagates if throw is "true".
116
116
117 reason for "true" meaning "hook failed" is so that
117 reason for "true" meaning "hook failed" is so that
118 unmodified commands (e.g. mercurial.commands.update) can
118 unmodified commands (e.g. mercurial.commands.update) can
119 be run as hooks without wrappers to convert return values.'''
119 be run as hooks without wrappers to convert return values.'''
120
120
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
122 obj = funcname
122 obj = funcname
123 if not callable(obj):
123 if not callable(obj):
124 d = funcname.rfind('.')
124 d = funcname.rfind('.')
125 if d == -1:
125 if d == -1:
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
127 'a module)') % (hname, funcname))
127 'a module)') % (hname, funcname))
128 modname = funcname[:d]
128 modname = funcname[:d]
129 try:
129 try:
130 obj = __import__(modname)
130 obj = __import__(modname)
131 except ImportError:
131 except ImportError:
132 try:
132 try:
133 # extensions are loaded with hgext_ prefix
133 # extensions are loaded with hgext_ prefix
134 obj = __import__("hgext_%s" % modname)
134 obj = __import__("hgext_%s" % modname)
135 except ImportError:
135 except ImportError:
136 raise util.Abort(_('%s hook is invalid '
136 raise util.Abort(_('%s hook is invalid '
137 '(import of "%s" failed)') %
137 '(import of "%s" failed)') %
138 (hname, modname))
138 (hname, modname))
139 try:
139 try:
140 for p in funcname.split('.')[1:]:
140 for p in funcname.split('.')[1:]:
141 obj = getattr(obj, p)
141 obj = getattr(obj, p)
142 except AttributeError, err:
142 except AttributeError, err:
143 raise util.Abort(_('%s hook is invalid '
143 raise util.Abort(_('%s hook is invalid '
144 '("%s" is not defined)') %
144 '("%s" is not defined)') %
145 (hname, funcname))
145 (hname, funcname))
146 if not callable(obj):
146 if not callable(obj):
147 raise util.Abort(_('%s hook is invalid '
147 raise util.Abort(_('%s hook is invalid '
148 '("%s" is not callable)') %
148 '("%s" is not callable)') %
149 (hname, funcname))
149 (hname, funcname))
150 try:
150 try:
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
152 except (KeyboardInterrupt, util.SignalInterrupt):
152 except (KeyboardInterrupt, util.SignalInterrupt):
153 raise
153 raise
154 except Exception, exc:
154 except Exception, exc:
155 if isinstance(exc, util.Abort):
155 if isinstance(exc, util.Abort):
156 self.ui.warn(_('error: %s hook failed: %s\n') %
156 self.ui.warn(_('error: %s hook failed: %s\n') %
157 (hname, exc.args[0]))
157 (hname, exc.args[0]))
158 else:
158 else:
159 self.ui.warn(_('error: %s hook raised an exception: '
159 self.ui.warn(_('error: %s hook raised an exception: '
160 '%s\n') % (hname, exc))
160 '%s\n') % (hname, exc))
161 if throw:
161 if throw:
162 raise
162 raise
163 self.ui.print_exc()
163 self.ui.print_exc()
164 return True
164 return True
165 if r:
165 if r:
166 if throw:
166 if throw:
167 raise util.Abort(_('%s hook failed') % hname)
167 raise util.Abort(_('%s hook failed') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
169 return r
169 return r
170
170
171 def runhook(name, cmd):
171 def runhook(name, cmd):
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
174 r = util.system(cmd, environ=env, cwd=self.root)
174 r = util.system(cmd, environ=env, cwd=self.root)
175 if r:
175 if r:
176 desc, r = util.explain_exit(r)
176 desc, r = util.explain_exit(r)
177 if throw:
177 if throw:
178 raise util.Abort(_('%s hook %s') % (name, desc))
178 raise util.Abort(_('%s hook %s') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
180 return r
180 return r
181
181
182 r = False
182 r = False
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
184 if hname.split(".", 1)[0] == name and cmd]
184 if hname.split(".", 1)[0] == name and cmd]
185 hooks.sort()
185 hooks.sort()
186 for hname, cmd in hooks:
186 for hname, cmd in hooks:
187 if callable(cmd):
187 if callable(cmd):
188 r = callhook(hname, cmd) or r
188 r = callhook(hname, cmd) or r
189 elif cmd.startswith('python:'):
189 elif cmd.startswith('python:'):
190 r = callhook(hname, cmd[7:].strip()) or r
190 r = callhook(hname, cmd[7:].strip()) or r
191 else:
191 else:
192 r = runhook(hname, cmd) or r
192 r = runhook(hname, cmd) or r
193 return r
193 return r
194
194
195 tag_disallowed = ':\r\n'
195 tag_disallowed = ':\r\n'
196
196
197 def _tag(self, name, node, message, local, user, date, parent=None):
197 def _tag(self, name, node, message, local, user, date, parent=None):
198 use_dirstate = parent is None
198 use_dirstate = parent is None
199
199
200 for c in self.tag_disallowed:
200 for c in self.tag_disallowed:
201 if c in name:
201 if c in name:
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
203
203
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
205
205
206 if local:
206 if local:
207 # local tags are stored in the current charset
207 # local tags are stored in the current charset
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
209 self.hook('tag', node=hex(node), tag=name, local=local)
209 self.hook('tag', node=hex(node), tag=name, local=local)
210 return
210 return
211
211
212 # committed tags are stored in UTF-8
212 # committed tags are stored in UTF-8
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
214 if use_dirstate:
214 if use_dirstate:
215 self.wfile('.hgtags', 'ab').write(line)
215 self.wfile('.hgtags', 'ab').write(line)
216 else:
216 else:
217 ntags = self.filectx('.hgtags', parent).data()
217 ntags = self.filectx('.hgtags', parent).data()
218 self.wfile('.hgtags', 'ab').write(ntags + line)
218 self.wfile('.hgtags', 'ab').write(ntags + line)
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
220 self.add(['.hgtags'])
220 self.add(['.hgtags'])
221
221
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
223
223
224 self.hook('tag', node=hex(node), tag=name, local=local)
224 self.hook('tag', node=hex(node), tag=name, local=local)
225
225
226 return tagnode
226 return tagnode
227
227
228 def tag(self, name, node, message, local, user, date):
228 def tag(self, name, node, message, local, user, date):
229 '''tag a revision with a symbolic name.
229 '''tag a revision with a symbolic name.
230
230
231 if local is True, the tag is stored in a per-repository file.
231 if local is True, the tag is stored in a per-repository file.
232 otherwise, it is stored in the .hgtags file, and a new
232 otherwise, it is stored in the .hgtags file, and a new
233 changeset is committed with the change.
233 changeset is committed with the change.
234
234
235 keyword arguments:
235 keyword arguments:
236
236
237 local: whether to store tag in non-version-controlled file
237 local: whether to store tag in non-version-controlled file
238 (default False)
238 (default False)
239
239
240 message: commit message to use if committing
240 message: commit message to use if committing
241
241
242 user: name of user to use if committing
242 user: name of user to use if committing
243
243
244 date: date tuple to use if committing'''
244 date: date tuple to use if committing'''
245
245
246 for x in self.status()[:5]:
246 for x in self.status()[:5]:
247 if '.hgtags' in x:
247 if '.hgtags' in x:
248 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
249 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
250
250
251
251
252 self._tag(name, node, message, local, user, date)
252 self._tag(name, node, message, local, user, date)
253
253
254 def tags(self):
254 def tags(self):
255 '''return a mapping of tag to node'''
255 '''return a mapping of tag to node'''
256 if self.tagscache:
256 if self.tagscache:
257 return self.tagscache
257 return self.tagscache
258
258
259 globaltags = {}
259 globaltags = {}
260
260
261 def readtags(lines, fn):
261 def readtags(lines, fn):
262 filetags = {}
262 filetags = {}
263 count = 0
263 count = 0
264
264
265 def warn(msg):
265 def warn(msg):
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
267
267
268 for l in lines:
268 for l in lines:
269 count += 1
269 count += 1
270 if not l:
270 if not l:
271 continue
271 continue
272 s = l.split(" ", 1)
272 s = l.split(" ", 1)
273 if len(s) != 2:
273 if len(s) != 2:
274 warn(_("cannot parse entry"))
274 warn(_("cannot parse entry"))
275 continue
275 continue
276 node, key = s
276 node, key = s
277 key = util.tolocal(key.strip()) # stored in UTF-8
277 key = util.tolocal(key.strip()) # stored in UTF-8
278 try:
278 try:
279 bin_n = bin(node)
279 bin_n = bin(node)
280 except TypeError:
280 except TypeError:
281 warn(_("node '%s' is not well formed") % node)
281 warn(_("node '%s' is not well formed") % node)
282 continue
282 continue
283 if bin_n not in self.changelog.nodemap:
283 if bin_n not in self.changelog.nodemap:
284 warn(_("tag '%s' refers to unknown node") % key)
284 warn(_("tag '%s' refers to unknown node") % key)
285 continue
285 continue
286
286
287 h = []
287 h = []
288 if key in filetags:
288 if key in filetags:
289 n, h = filetags[key]
289 n, h = filetags[key]
290 h.append(n)
290 h.append(n)
291 filetags[key] = (bin_n, h)
291 filetags[key] = (bin_n, h)
292
292
293 for k,nh in filetags.items():
293 for k,nh in filetags.items():
294 if k not in globaltags:
294 if k not in globaltags:
295 globaltags[k] = nh
295 globaltags[k] = nh
296 continue
296 continue
297 # we prefer the global tag if:
297 # we prefer the global tag if:
298 # it supercedes us OR
298 # it supercedes us OR
299 # mutual supercedes and it has a higher rank
299 # mutual supercedes and it has a higher rank
300 # otherwise we win because we're tip-most
300 # otherwise we win because we're tip-most
301 an, ah = nh
301 an, ah = nh
302 bn, bh = globaltags[k]
302 bn, bh = globaltags[k]
303 if bn != an and an in bh and \
303 if bn != an and an in bh and \
304 (bn not in ah or len(bh) > len(ah)):
304 (bn not in ah or len(bh) > len(ah)):
305 an = bn
305 an = bn
306 ah.append([n for n in bh if n not in ah])
306 ah.append([n for n in bh if n not in ah])
307 globaltags[k] = an, ah
307 globaltags[k] = an, ah
308
308
309 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
310 f = None
310 f = None
311 for rev, node, fnode in self._hgtagsnodes():
311 for rev, node, fnode in self._hgtagsnodes():
312 f = (f and f.filectx(fnode) or
312 f = (f and f.filectx(fnode) or
313 self.filectx('.hgtags', fileid=fnode))
313 self.filectx('.hgtags', fileid=fnode))
314 readtags(f.data().splitlines(), f)
314 readtags(f.data().splitlines(), f)
315
315
316 try:
316 try:
317 data = util.fromlocal(self.opener("localtags").read())
317 data = util.fromlocal(self.opener("localtags").read())
318 # localtags are stored in the local character set
318 # localtags are stored in the local character set
319 # while the internal tag table is stored in UTF-8
319 # while the internal tag table is stored in UTF-8
320 readtags(data.splitlines(), "localtags")
320 readtags(data.splitlines(), "localtags")
321 except IOError:
321 except IOError:
322 pass
322 pass
323
323
324 self.tagscache = {}
324 self.tagscache = {}
325 for k,nh in globaltags.items():
325 for k,nh in globaltags.items():
326 n = nh[0]
326 n = nh[0]
327 if n != nullid:
327 if n != nullid:
328 self.tagscache[k] = n
328 self.tagscache[k] = n
329 self.tagscache['tip'] = self.changelog.tip()
329 self.tagscache['tip'] = self.changelog.tip()
330
330
331 return self.tagscache
331 return self.tagscache
332
332
333 def _hgtagsnodes(self):
333 def _hgtagsnodes(self):
334 heads = self.heads()
334 heads = self.heads()
335 heads.reverse()
335 heads.reverse()
336 last = {}
336 last = {}
337 ret = []
337 ret = []
338 for node in heads:
338 for node in heads:
339 c = self.changectx(node)
339 c = self.changectx(node)
340 rev = c.rev()
340 rev = c.rev()
341 try:
341 try:
342 fnode = c.filenode('.hgtags')
342 fnode = c.filenode('.hgtags')
343 except revlog.LookupError:
343 except revlog.LookupError:
344 continue
344 continue
345 ret.append((rev, node, fnode))
345 ret.append((rev, node, fnode))
346 if fnode in last:
346 if fnode in last:
347 ret[last[fnode]] = None
347 ret[last[fnode]] = None
348 last[fnode] = len(ret) - 1
348 last[fnode] = len(ret) - 1
349 return [item for item in ret if item]
349 return [item for item in ret if item]
350
350
351 def tagslist(self):
351 def tagslist(self):
352 '''return a list of tags ordered by revision'''
352 '''return a list of tags ordered by revision'''
353 l = []
353 l = []
354 for t, n in self.tags().items():
354 for t, n in self.tags().items():
355 try:
355 try:
356 r = self.changelog.rev(n)
356 r = self.changelog.rev(n)
357 except:
357 except:
358 r = -2 # sort to the beginning of the list if unknown
358 r = -2 # sort to the beginning of the list if unknown
359 l.append((r, t, n))
359 l.append((r, t, n))
360 l.sort()
360 l.sort()
361 return [(t, n) for r, t, n in l]
361 return [(t, n) for r, t, n in l]
362
362
363 def nodetags(self, node):
363 def nodetags(self, node):
364 '''return the tags associated with a node'''
364 '''return the tags associated with a node'''
365 if not self.nodetagscache:
365 if not self.nodetagscache:
366 self.nodetagscache = {}
366 self.nodetagscache = {}
367 for t, n in self.tags().items():
367 for t, n in self.tags().items():
368 self.nodetagscache.setdefault(n, []).append(t)
368 self.nodetagscache.setdefault(n, []).append(t)
369 return self.nodetagscache.get(node, [])
369 return self.nodetagscache.get(node, [])
370
370
371 def _branchtags(self):
371 def _branchtags(self):
372 partial, last, lrev = self._readbranchcache()
372 partial, last, lrev = self._readbranchcache()
373
373
374 tiprev = self.changelog.count() - 1
374 tiprev = self.changelog.count() - 1
375 if lrev != tiprev:
375 if lrev != tiprev:
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378
378
379 return partial
379 return partial
380
380
381 def branchtags(self):
381 def branchtags(self):
382 if self.branchcache is not None:
382 if self.branchcache is not None:
383 return self.branchcache
383 return self.branchcache
384
384
385 self.branchcache = {} # avoid recursion in changectx
385 self.branchcache = {} # avoid recursion in changectx
386 partial = self._branchtags()
386 partial = self._branchtags()
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 return self.branchcache
392 return self.branchcache
393
393
394 def _readbranchcache(self):
394 def _readbranchcache(self):
395 partial = {}
395 partial = {}
396 try:
396 try:
397 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
398 lines = f.read().split('\n')
398 lines = f.read().split('\n')
399 f.close()
399 f.close()
400 except (IOError, OSError):
400 except (IOError, OSError):
401 return {}, nullid, nullrev
401 return {}, nullid, nullrev
402
402
403 try:
403 try:
404 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
406 if not (lrev < self.changelog.count() and
406 if not (lrev < self.changelog.count() and
407 self.changelog.node(lrev) == last): # sanity check
407 self.changelog.node(lrev) == last): # sanity check
408 # invalidate the cache
408 # invalidate the cache
409 raise ValueError('Invalid branch cache: unknown tip')
409 raise ValueError('Invalid branch cache: unknown tip')
410 for l in lines:
410 for l in lines:
411 if not l: continue
411 if not l: continue
412 node, label = l.split(" ", 1)
412 node, label = l.split(" ", 1)
413 partial[label.strip()] = bin(node)
413 partial[label.strip()] = bin(node)
414 except (KeyboardInterrupt, util.SignalInterrupt):
414 except (KeyboardInterrupt, util.SignalInterrupt):
415 raise
415 raise
416 except Exception, inst:
416 except Exception, inst:
417 if self.ui.debugflag:
417 if self.ui.debugflag:
418 self.ui.warn(str(inst), '\n')
418 self.ui.warn(str(inst), '\n')
419 partial, last, lrev = {}, nullid, nullrev
419 partial, last, lrev = {}, nullid, nullrev
420 return partial, last, lrev
420 return partial, last, lrev
421
421
422 def _writebranchcache(self, branches, tip, tiprev):
422 def _writebranchcache(self, branches, tip, tiprev):
423 try:
423 try:
424 f = self.opener("branch.cache", "w", atomictemp=True)
424 f = self.opener("branch.cache", "w", atomictemp=True)
425 f.write("%s %s\n" % (hex(tip), tiprev))
425 f.write("%s %s\n" % (hex(tip), tiprev))
426 for label, node in branches.iteritems():
426 for label, node in branches.iteritems():
427 f.write("%s %s\n" % (hex(node), label))
427 f.write("%s %s\n" % (hex(node), label))
428 f.rename()
428 f.rename()
429 except (IOError, OSError):
429 except (IOError, OSError):
430 pass
430 pass
431
431
432 def _updatebranchcache(self, partial, start, end):
432 def _updatebranchcache(self, partial, start, end):
433 for r in xrange(start, end):
433 for r in xrange(start, end):
434 c = self.changectx(r)
434 c = self.changectx(r)
435 b = c.branch()
435 b = c.branch()
436 partial[b] = c.node()
436 partial[b] = c.node()
437
437
438 def lookup(self, key):
438 def lookup(self, key):
439 if key == '.':
439 if key == '.':
440 key = self.dirstate.parents()[0]
440 key = self.dirstate.parents()[0]
441 if key == nullid:
441 if key == nullid:
442 raise repo.RepoError(_("no revision checked out"))
442 raise repo.RepoError(_("no revision checked out"))
443 elif key == 'null':
443 elif key == 'null':
444 return nullid
444 return nullid
445 n = self.changelog._match(key)
445 n = self.changelog._match(key)
446 if n:
446 if n:
447 return n
447 return n
448 if key in self.tags():
448 if key in self.tags():
449 return self.tags()[key]
449 return self.tags()[key]
450 if key in self.branchtags():
450 if key in self.branchtags():
451 return self.branchtags()[key]
451 return self.branchtags()[key]
452 n = self.changelog._partialmatch(key)
452 n = self.changelog._partialmatch(key)
453 if n:
453 if n:
454 return n
454 return n
455 raise repo.RepoError(_("unknown revision '%s'") % key)
455 raise repo.RepoError(_("unknown revision '%s'") % key)
456
456
457 def dev(self):
457 def dev(self):
458 return os.lstat(self.path).st_dev
458 return os.lstat(self.path).st_dev
459
459
460 def local(self):
460 def local(self):
461 return True
461 return True
462
462
463 def join(self, f):
463 def join(self, f):
464 return os.path.join(self.path, f)
464 return os.path.join(self.path, f)
465
465
466 def sjoin(self, f):
466 def sjoin(self, f):
467 f = self.encodefn(f)
467 f = self.encodefn(f)
468 return os.path.join(self.spath, f)
468 return os.path.join(self.spath, f)
469
469
470 def wjoin(self, f):
470 def wjoin(self, f):
471 return os.path.join(self.root, f)
471 return os.path.join(self.root, f)
472
472
473 def file(self, f):
473 def file(self, f):
474 if f[0] == '/':
474 if f[0] == '/':
475 f = f[1:]
475 f = f[1:]
476 return filelog.filelog(self.sopener, f)
476 return filelog.filelog(self.sopener, f)
477
477
478 def changectx(self, changeid=None):
478 def changectx(self, changeid=None):
479 return context.changectx(self, changeid)
479 return context.changectx(self, changeid)
480
480
481 def workingctx(self):
481 def workingctx(self):
482 return context.workingctx(self)
482 return context.workingctx(self)
483
483
484 def parents(self, changeid=None):
484 def parents(self, changeid=None):
485 '''
485 '''
486 get list of changectxs for parents of changeid or working directory
486 get list of changectxs for parents of changeid or working directory
487 '''
487 '''
488 if changeid is None:
488 if changeid is None:
489 pl = self.dirstate.parents()
489 pl = self.dirstate.parents()
490 else:
490 else:
491 n = self.changelog.lookup(changeid)
491 n = self.changelog.lookup(changeid)
492 pl = self.changelog.parents(n)
492 pl = self.changelog.parents(n)
493 if pl[1] == nullid:
493 if pl[1] == nullid:
494 return [self.changectx(pl[0])]
494 return [self.changectx(pl[0])]
495 return [self.changectx(pl[0]), self.changectx(pl[1])]
495 return [self.changectx(pl[0]), self.changectx(pl[1])]
496
496
497 def filectx(self, path, changeid=None, fileid=None):
497 def filectx(self, path, changeid=None, fileid=None):
498 """changeid can be a changeset revision, node, or tag.
498 """changeid can be a changeset revision, node, or tag.
499 fileid can be a file revision or node."""
499 fileid can be a file revision or node."""
500 return context.filectx(self, path, changeid, fileid)
500 return context.filectx(self, path, changeid, fileid)
501
501
502 def getcwd(self):
502 def getcwd(self):
503 return self.dirstate.getcwd()
503 return self.dirstate.getcwd()
504
504
505 def wfile(self, f, mode='r'):
505 def wfile(self, f, mode='r'):
506 return self.wopener(f, mode)
506 return self.wopener(f, mode)
507
507
508 def _link(self, f):
508 def _link(self, f):
509 return os.path.islink(self.wjoin(f))
509 return os.path.islink(self.wjoin(f))
510
510
511 def _filter(self, filter, filename, data):
511 def _filter(self, filter, filename, data):
512 if filter not in self.filterpats:
512 if filter not in self.filterpats:
513 l = []
513 l = []
514 for pat, cmd in self.ui.configitems(filter):
514 for pat, cmd in self.ui.configitems(filter):
515 mf = util.matcher(self.root, "", [pat], [], [])[1]
515 mf = util.matcher(self.root, "", [pat], [], [])[1]
516 l.append((mf, cmd))
516 l.append((mf, cmd))
517 self.filterpats[filter] = l
517 self.filterpats[filter] = l
518
518
519 for mf, cmd in self.filterpats[filter]:
519 for mf, cmd in self.filterpats[filter]:
520 if mf(filename):
520 if mf(filename):
521 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
521 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
522 data = util.filter(data, cmd)
522 data = util.filter(data, cmd)
523 break
523 break
524
524
525 return data
525 return data
526
526
527 def wread(self, filename):
527 def wread(self, filename):
528 if self._link(filename):
528 if self._link(filename):
529 data = os.readlink(self.wjoin(filename))
529 data = os.readlink(self.wjoin(filename))
530 else:
530 else:
531 data = self.wopener(filename, 'r').read()
531 data = self.wopener(filename, 'r').read()
532 return self._filter("encode", filename, data)
532 return self._filter("encode", filename, data)
533
533
534 def wwrite(self, filename, data, flags):
534 def wwrite(self, filename, data, flags):
535 data = self._filter("decode", filename, data)
535 data = self._filter("decode", filename, data)
536 if "l" in flags:
536 if "l" in flags:
537 f = self.wjoin(filename)
537 f = self.wjoin(filename)
538 try:
538 try:
539 os.unlink(f)
539 os.unlink(f)
540 except OSError:
540 except OSError:
541 pass
541 pass
542 d = os.path.dirname(f)
542 d = os.path.dirname(f)
543 if not os.path.exists(d):
543 if not os.path.exists(d):
544 os.makedirs(d)
544 os.makedirs(d)
545 os.symlink(data, f)
545 os.symlink(data, f)
546 else:
546 else:
547 try:
547 try:
548 if self._link(filename):
548 if self._link(filename):
549 os.unlink(self.wjoin(filename))
549 os.unlink(self.wjoin(filename))
550 except OSError:
550 except OSError:
551 pass
551 pass
552 self.wopener(filename, 'w').write(data)
552 self.wopener(filename, 'w').write(data)
553 util.set_exec(self.wjoin(filename), "x" in flags)
553 util.set_exec(self.wjoin(filename), "x" in flags)
554
554
555 def wwritedata(self, filename, data):
555 def wwritedata(self, filename, data):
556 return self._filter("decode", filename, data)
556 return self._filter("decode", filename, data)
557
557
558 def transaction(self):
558 def transaction(self):
559 tr = self.transhandle
559 tr = self.transhandle
560 if tr != None and tr.running():
560 if tr != None and tr.running():
561 return tr.nest()
561 return tr.nest()
562
562
563 # save dirstate for rollback
563 # save dirstate for rollback
564 try:
564 try:
565 ds = self.opener("dirstate").read()
565 ds = self.opener("dirstate").read()
566 except IOError:
566 except IOError:
567 ds = ""
567 ds = ""
568 self.opener("journal.dirstate", "w").write(ds)
568 self.opener("journal.dirstate", "w").write(ds)
569
569
570 renames = [(self.sjoin("journal"), self.sjoin("undo")),
570 renames = [(self.sjoin("journal"), self.sjoin("undo")),
571 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
571 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
572 tr = transaction.transaction(self.ui.warn, self.sopener,
572 tr = transaction.transaction(self.ui.warn, self.sopener,
573 self.sjoin("journal"),
573 self.sjoin("journal"),
574 aftertrans(renames))
574 aftertrans(renames))
575 self.transhandle = tr
575 self.transhandle = tr
576 return tr
576 return tr
577
577
578 def recover(self):
578 def recover(self):
579 l = self.lock()
579 l = self.lock()
580 if os.path.exists(self.sjoin("journal")):
580 if os.path.exists(self.sjoin("journal")):
581 self.ui.status(_("rolling back interrupted transaction\n"))
581 self.ui.status(_("rolling back interrupted transaction\n"))
582 transaction.rollback(self.sopener, self.sjoin("journal"))
582 transaction.rollback(self.sopener, self.sjoin("journal"))
583 self.reload()
583 self.reload()
584 return True
584 return True
585 else:
585 else:
586 self.ui.warn(_("no interrupted transaction available\n"))
586 self.ui.warn(_("no interrupted transaction available\n"))
587 return False
587 return False
588
588
589 def rollback(self, wlock=None, lock=None):
589 def rollback(self, wlock=None, lock=None):
590 if not wlock:
590 if not wlock:
591 wlock = self.wlock()
591 wlock = self.wlock()
592 if not lock:
592 if not lock:
593 lock = self.lock()
593 lock = self.lock()
594 if os.path.exists(self.sjoin("undo")):
594 if os.path.exists(self.sjoin("undo")):
595 self.ui.status(_("rolling back last transaction\n"))
595 self.ui.status(_("rolling back last transaction\n"))
596 transaction.rollback(self.sopener, self.sjoin("undo"))
596 transaction.rollback(self.sopener, self.sjoin("undo"))
597 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
597 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
598 self.reload()
598 self.reload()
599 self.wreload()
599 self.wreload()
600 else:
600 else:
601 self.ui.warn(_("no rollback information available\n"))
601 self.ui.warn(_("no rollback information available\n"))
602
602
603 def wreload(self):
603 def wreload(self):
604 self.dirstate.reload()
604 self.dirstate.reload()
605
605
606 def reload(self):
606 def reload(self):
607 self.changelog.load()
607 self.changelog.load()
608 self.manifest.load()
608 self.manifest.load()
609 self.tagscache = None
609 self.tagscache = None
610 self.nodetagscache = None
610 self.nodetagscache = None
611
611
612 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
612 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
613 desc=None):
613 desc=None):
614 try:
614 try:
615 l = lock.lock(lockname, 0, releasefn, desc=desc)
615 l = lock.lock(lockname, 0, releasefn, desc=desc)
616 except lock.LockHeld, inst:
616 except lock.LockHeld, inst:
617 if not wait:
617 if not wait:
618 raise
618 raise
619 self.ui.warn(_("waiting for lock on %s held by %r\n") %
619 self.ui.warn(_("waiting for lock on %s held by %r\n") %
620 (desc, inst.locker))
620 (desc, inst.locker))
621 # default to 600 seconds timeout
621 # default to 600 seconds timeout
622 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
622 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
623 releasefn, desc=desc)
623 releasefn, desc=desc)
624 if acquirefn:
624 if acquirefn:
625 acquirefn()
625 acquirefn()
626 return l
626 return l
627
627
628 def lock(self, wait=1):
628 def lock(self, wait=1):
629 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
629 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
630 desc=_('repository %s') % self.origroot)
630 desc=_('repository %s') % self.origroot)
631
631
632 def wlock(self, wait=1):
632 def wlock(self, wait=1):
633 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
633 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
634 self.wreload,
634 self.wreload,
635 desc=_('working directory of %s') % self.origroot)
635 desc=_('working directory of %s') % self.origroot)
636
636
637 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
637 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
638 """
638 """
639 commit an individual file as part of a larger transaction
639 commit an individual file as part of a larger transaction
640 """
640 """
641
641
642 t = self.wread(fn)
642 t = self.wread(fn)
643 fl = self.file(fn)
643 fl = self.file(fn)
644 fp1 = manifest1.get(fn, nullid)
644 fp1 = manifest1.get(fn, nullid)
645 fp2 = manifest2.get(fn, nullid)
645 fp2 = manifest2.get(fn, nullid)
646
646
647 meta = {}
647 meta = {}
648 cp = self.dirstate.copied(fn)
648 cp = self.dirstate.copied(fn)
649 if cp:
649 if cp:
650 # Mark the new revision of this file as a copy of another
650 # Mark the new revision of this file as a copy of another
651 # file. This copy data will effectively act as a parent
651 # file. This copy data will effectively act as a parent
652 # of this new revision. If this is a merge, the first
652 # of this new revision. If this is a merge, the first
653 # parent will be the nullid (meaning "look up the copy data")
653 # parent will be the nullid (meaning "look up the copy data")
654 # and the second one will be the other parent. For example:
654 # and the second one will be the other parent. For example:
655 #
655 #
656 # 0 --- 1 --- 3 rev1 changes file foo
656 # 0 --- 1 --- 3 rev1 changes file foo
657 # \ / rev2 renames foo to bar and changes it
657 # \ / rev2 renames foo to bar and changes it
658 # \- 2 -/ rev3 should have bar with all changes and
658 # \- 2 -/ rev3 should have bar with all changes and
659 # should record that bar descends from
659 # should record that bar descends from
660 # bar in rev2 and foo in rev1
660 # bar in rev2 and foo in rev1
661 #
661 #
662 # this allows this merge to succeed:
662 # this allows this merge to succeed:
663 #
663 #
664 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
664 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
665 # \ / merging rev3 and rev4 should use bar@rev2
665 # \ / merging rev3 and rev4 should use bar@rev2
666 # \- 2 --- 4 as the merge base
666 # \- 2 --- 4 as the merge base
667 #
667 #
668 meta["copy"] = cp
668 meta["copy"] = cp
669 if not manifest2: # not a branch merge
669 if not manifest2: # not a branch merge
670 meta["copyrev"] = hex(manifest1.get(cp, nullid))
670 meta["copyrev"] = hex(manifest1.get(cp, nullid))
671 fp2 = nullid
671 fp2 = nullid
672 elif fp2 != nullid: # copied on remote side
672 elif fp2 != nullid: # copied on remote side
673 meta["copyrev"] = hex(manifest1.get(cp, nullid))
673 meta["copyrev"] = hex(manifest1.get(cp, nullid))
674 elif fp1 != nullid: # copied on local side, reversed
674 elif fp1 != nullid: # copied on local side, reversed
675 meta["copyrev"] = hex(manifest2.get(cp))
675 meta["copyrev"] = hex(manifest2.get(cp))
676 fp2 = fp1
676 fp2 = fp1
677 else: # directory rename
677 else: # directory rename
678 meta["copyrev"] = hex(manifest1.get(cp, nullid))
678 meta["copyrev"] = hex(manifest1.get(cp, nullid))
679 self.ui.debug(_(" %s: copy %s:%s\n") %
679 self.ui.debug(_(" %s: copy %s:%s\n") %
680 (fn, cp, meta["copyrev"]))
680 (fn, cp, meta["copyrev"]))
681 fp1 = nullid
681 fp1 = nullid
682 elif fp2 != nullid:
682 elif fp2 != nullid:
683 # is one parent an ancestor of the other?
683 # is one parent an ancestor of the other?
684 fpa = fl.ancestor(fp1, fp2)
684 fpa = fl.ancestor(fp1, fp2)
685 if fpa == fp1:
685 if fpa == fp1:
686 fp1, fp2 = fp2, nullid
686 fp1, fp2 = fp2, nullid
687 elif fpa == fp2:
687 elif fpa == fp2:
688 fp2 = nullid
688 fp2 = nullid
689
689
690 # is the file unmodified from the parent? report existing entry
690 # is the file unmodified from the parent? report existing entry
691 if fp2 == nullid and not fl.cmp(fp1, t):
691 if fp2 == nullid and not fl.cmp(fp1, t):
692 return fp1
692 return fp1
693
693
694 changelist.append(fn)
694 changelist.append(fn)
695 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
695 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
696
696
697 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
697 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
698 if p1 is None:
698 if p1 is None:
699 p1, p2 = self.dirstate.parents()
699 p1, p2 = self.dirstate.parents()
700 return self.commit(files=files, text=text, user=user, date=date,
700 return self.commit(files=files, text=text, user=user, date=date,
701 p1=p1, p2=p2, wlock=wlock, extra=extra)
701 p1=p1, p2=p2, wlock=wlock, extra=extra)
702
702
703 def commit(self, files=None, text="", user=None, date=None,
703 def commit(self, files=None, text="", user=None, date=None,
704 match=util.always, force=False, lock=None, wlock=None,
704 match=util.always, force=False, lock=None, wlock=None,
705 force_editor=False, p1=None, p2=None, extra={}):
705 force_editor=False, p1=None, p2=None, extra={}):
706
706
707 commit = []
707 commit = []
708 remove = []
708 remove = []
709 changed = []
709 changed = []
710 use_dirstate = (p1 is None) # not rawcommit
710 use_dirstate = (p1 is None) # not rawcommit
711 extra = extra.copy()
711 extra = extra.copy()
712
712
713 if use_dirstate:
713 if use_dirstate:
714 if files:
714 if files:
715 for f in files:
715 for f in files:
716 s = self.dirstate.state(f)
716 s = self.dirstate.state(f)
717 if s in 'nmai':
717 if s in 'nmai':
718 commit.append(f)
718 commit.append(f)
719 elif s == 'r':
719 elif s == 'r':
720 remove.append(f)
720 remove.append(f)
721 else:
721 else:
722 self.ui.warn(_("%s not tracked!\n") % f)
722 self.ui.warn(_("%s not tracked!\n") % f)
723 else:
723 else:
724 changes = self.status(match=match)[:5]
724 changes = self.status(match=match)[:5]
725 modified, added, removed, deleted, unknown = changes
725 modified, added, removed, deleted, unknown = changes
726 commit = modified + added
726 commit = modified + added
727 remove = removed
727 remove = removed
728 else:
728 else:
729 commit = files
729 commit = files
730
730
731 if use_dirstate:
731 if use_dirstate:
732 p1, p2 = self.dirstate.parents()
732 p1, p2 = self.dirstate.parents()
733 update_dirstate = True
733 update_dirstate = True
734 else:
734 else:
735 p1, p2 = p1, p2 or nullid
735 p1, p2 = p1, p2 or nullid
736 update_dirstate = (self.dirstate.parents()[0] == p1)
736 update_dirstate = (self.dirstate.parents()[0] == p1)
737
737
738 c1 = self.changelog.read(p1)
738 c1 = self.changelog.read(p1)
739 c2 = self.changelog.read(p2)
739 c2 = self.changelog.read(p2)
740 m1 = self.manifest.read(c1[0]).copy()
740 m1 = self.manifest.read(c1[0]).copy()
741 m2 = self.manifest.read(c2[0])
741 m2 = self.manifest.read(c2[0])
742
742
743 if use_dirstate:
743 if use_dirstate:
744 branchname = self.workingctx().branch()
744 branchname = self.workingctx().branch()
745 try:
745 try:
746 branchname = branchname.decode('UTF-8').encode('UTF-8')
746 branchname = branchname.decode('UTF-8').encode('UTF-8')
747 except UnicodeDecodeError:
747 except UnicodeDecodeError:
748 raise util.Abort(_('branch name not in UTF-8!'))
748 raise util.Abort(_('branch name not in UTF-8!'))
749 else:
749 else:
750 branchname = ""
750 branchname = ""
751
751
752 if use_dirstate:
752 if use_dirstate:
753 oldname = c1[5].get("branch") # stored in UTF-8
753 oldname = c1[5].get("branch") # stored in UTF-8
754 if not commit and not remove and not force and p2 == nullid and \
754 if not commit and not remove and not force and p2 == nullid and \
755 branchname == oldname:
755 branchname == oldname:
756 self.ui.status(_("nothing changed\n"))
756 self.ui.status(_("nothing changed\n"))
757 return None
757 return None
758
758
759 xp1 = hex(p1)
759 xp1 = hex(p1)
760 if p2 == nullid: xp2 = ''
760 if p2 == nullid: xp2 = ''
761 else: xp2 = hex(p2)
761 else: xp2 = hex(p2)
762
762
763 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
763 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
764
764
765 if not wlock:
765 if not wlock:
766 wlock = self.wlock()
766 wlock = self.wlock()
767 if not lock:
767 if not lock:
768 lock = self.lock()
768 lock = self.lock()
769 tr = self.transaction()
769 tr = self.transaction()
770
770
771 # check in files
771 # check in files
772 new = {}
772 new = {}
773 linkrev = self.changelog.count()
773 linkrev = self.changelog.count()
774 commit.sort()
774 commit.sort()
775 is_exec = util.execfunc(self.root, m1.execf)
775 is_exec = util.execfunc(self.root, m1.execf)
776 is_link = util.linkfunc(self.root, m1.linkf)
776 is_link = util.linkfunc(self.root, m1.linkf)
777 for f in commit:
777 for f in commit:
778 self.ui.note(f + "\n")
778 self.ui.note(f + "\n")
779 try:
779 try:
780 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
780 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
781 m1.set(f, is_exec(f), is_link(f))
781 m1.set(f, is_exec(f), is_link(f))
782 except (OSError, IOError):
782 except (OSError, IOError):
783 if use_dirstate:
783 if use_dirstate:
784 self.ui.warn(_("trouble committing %s!\n") % f)
784 self.ui.warn(_("trouble committing %s!\n") % f)
785 raise
785 raise
786 else:
786 else:
787 remove.append(f)
787 remove.append(f)
788
788
789 # update manifest
789 # update manifest
790 m1.update(new)
790 m1.update(new)
791 remove.sort()
791 remove.sort()
792 removed = []
792 removed = []
793
793
794 for f in remove:
794 for f in remove:
795 if f in m1:
795 if f in m1:
796 del m1[f]
796 del m1[f]
797 removed.append(f)
797 removed.append(f)
798 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
798 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
799
799
800 # add changeset
800 # add changeset
801 new = new.keys()
801 new = new.keys()
802 new.sort()
802 new.sort()
803
803
804 user = user or self.ui.username()
804 user = user or self.ui.username()
805 if not text or force_editor:
805 if not text or force_editor:
806 edittext = []
806 edittext = []
807 if text:
807 if text:
808 edittext.append(text)
808 edittext.append(text)
809 edittext.append("")
809 edittext.append("")
810 edittext.append("HG: user: %s" % user)
810 edittext.append("HG: user: %s" % user)
811 if p2 != nullid:
811 if p2 != nullid:
812 edittext.append("HG: branch merge")
812 edittext.append("HG: branch merge")
813 if branchname:
813 if branchname:
814 edittext.append("HG: branch %s" % util.tolocal(branchname))
814 edittext.append("HG: branch %s" % util.tolocal(branchname))
815 edittext.extend(["HG: changed %s" % f for f in changed])
815 edittext.extend(["HG: changed %s" % f for f in changed])
816 edittext.extend(["HG: removed %s" % f for f in removed])
816 edittext.extend(["HG: removed %s" % f for f in removed])
817 if not changed and not remove:
817 if not changed and not remove:
818 edittext.append("HG: no files changed")
818 edittext.append("HG: no files changed")
819 edittext.append("")
819 edittext.append("")
820 # run editor in the repository root
820 # run editor in the repository root
821 olddir = os.getcwd()
821 olddir = os.getcwd()
822 os.chdir(self.root)
822 os.chdir(self.root)
823 text = self.ui.edit("\n".join(edittext), user)
823 text = self.ui.edit("\n".join(edittext), user)
824 os.chdir(olddir)
824 os.chdir(olddir)
825
825
826 lines = [line.rstrip() for line in text.rstrip().splitlines()]
826 lines = [line.rstrip() for line in text.rstrip().splitlines()]
827 while lines and not lines[0]:
827 while lines and not lines[0]:
828 del lines[0]
828 del lines[0]
829 if not lines:
829 if not lines:
830 return None
830 return None
831 text = '\n'.join(lines)
831 text = '\n'.join(lines)
832 if branchname:
832 if branchname:
833 extra["branch"] = branchname
833 extra["branch"] = branchname
834 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
834 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
835 user, date, extra)
835 user, date, extra)
836 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
836 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
837 parent2=xp2)
837 parent2=xp2)
838 tr.close()
838 tr.close()
839
839
840 if self.branchcache and "branch" in extra:
840 if self.branchcache and "branch" in extra:
841 self.branchcache[util.tolocal(extra["branch"])] = n
841 self.branchcache[util.tolocal(extra["branch"])] = n
842
842
843 if use_dirstate or update_dirstate:
843 if use_dirstate or update_dirstate:
844 self.dirstate.setparents(n)
844 self.dirstate.setparents(n)
845 if use_dirstate:
845 if use_dirstate:
846 self.dirstate.update(new, "n")
846 self.dirstate.update(new, "n")
847 self.dirstate.forget(removed)
847 self.dirstate.forget(removed)
848
848
849 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
849 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
850 return n
850 return n
851
851
852 def walk(self, node=None, files=[], match=util.always, badmatch=None):
852 def walk(self, node=None, files=[], match=util.always, badmatch=None):
853 '''
853 '''
854 walk recursively through the directory tree or a given
854 walk recursively through the directory tree or a given
855 changeset, finding all files matched by the match
855 changeset, finding all files matched by the match
856 function
856 function
857
857
858 results are yielded in a tuple (src, filename), where src
858 results are yielded in a tuple (src, filename), where src
859 is one of:
859 is one of:
860 'f' the file was found in the directory tree
860 'f' the file was found in the directory tree
861 'm' the file was only in the dirstate and not in the tree
861 'm' the file was only in the dirstate and not in the tree
862 'b' file was not found and matched badmatch
862 'b' file was not found and matched badmatch
863 '''
863 '''
864
864
865 if node:
865 if node:
866 fdict = dict.fromkeys(files)
866 fdict = dict.fromkeys(files)
867 # for dirstate.walk, files=['.'] means "walk the whole tree".
867 # for dirstate.walk, files=['.'] means "walk the whole tree".
868 # follow that here, too
868 # follow that here, too
869 fdict.pop('.', None)
869 fdict.pop('.', None)
870 mdict = self.manifest.read(self.changelog.read(node)[0])
870 mdict = self.manifest.read(self.changelog.read(node)[0])
871 mfiles = mdict.keys()
871 mfiles = mdict.keys()
872 mfiles.sort()
872 mfiles.sort()
873 for fn in mfiles:
873 for fn in mfiles:
874 for ffn in fdict:
874 for ffn in fdict:
875 # match if the file is the exact name or a directory
875 # match if the file is the exact name or a directory
876 if ffn == fn or fn.startswith("%s/" % ffn):
876 if ffn == fn or fn.startswith("%s/" % ffn):
877 del fdict[ffn]
877 del fdict[ffn]
878 break
878 break
879 if match(fn):
879 if match(fn):
880 yield 'm', fn
880 yield 'm', fn
881 ffiles = fdict.keys()
881 ffiles = fdict.keys()
882 ffiles.sort()
882 ffiles.sort()
883 for fn in ffiles:
883 for fn in ffiles:
884 if badmatch and badmatch(fn):
884 if badmatch and badmatch(fn):
885 if match(fn):
885 if match(fn):
886 yield 'b', fn
886 yield 'b', fn
887 else:
887 else:
888 self.ui.warn(_('%s: No such file in rev %s\n') % (
888 self.ui.warn(_('%s: No such file in rev %s\n') % (
889 util.pathto(self.root, self.getcwd(), fn), short(node)))
889 util.pathto(self.root, self.getcwd(), fn), short(node)))
890 else:
890 else:
891 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
891 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
892 yield src, fn
892 yield src, fn
893
893
894 def status(self, node1=None, node2=None, files=[], match=util.always,
894 def status(self, node1=None, node2=None, files=[], match=util.always,
895 wlock=None, list_ignored=False, list_clean=False):
895 wlock=None, list_ignored=False, list_clean=False):
896 """return status of files between two nodes or node and working directory
896 """return status of files between two nodes or node and working directory
897
897
898 If node1 is None, use the first dirstate parent instead.
898 If node1 is None, use the first dirstate parent instead.
899 If node2 is None, compare node1 with working directory.
899 If node2 is None, compare node1 with working directory.
900 """
900 """
901
901
902 def fcmp(fn, getnode):
902 def fcmp(fn, getnode):
903 t1 = self.wread(fn)
903 t1 = self.wread(fn)
904 return self.file(fn).cmp(getnode(fn), t1)
904 return self.file(fn).cmp(getnode(fn), t1)
905
905
906 def mfmatches(node):
906 def mfmatches(node):
907 change = self.changelog.read(node)
907 change = self.changelog.read(node)
908 mf = self.manifest.read(change[0]).copy()
908 mf = self.manifest.read(change[0]).copy()
909 for fn in mf.keys():
909 for fn in mf.keys():
910 if not match(fn):
910 if not match(fn):
911 del mf[fn]
911 del mf[fn]
912 return mf
912 return mf
913
913
914 modified, added, removed, deleted, unknown = [], [], [], [], []
914 modified, added, removed, deleted, unknown = [], [], [], [], []
915 ignored, clean = [], []
915 ignored, clean = [], []
916
916
917 compareworking = False
917 compareworking = False
918 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
918 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
919 compareworking = True
919 compareworking = True
920
920
921 if not compareworking:
921 if not compareworking:
922 # read the manifest from node1 before the manifest from node2,
922 # read the manifest from node1 before the manifest from node2,
923 # so that we'll hit the manifest cache if we're going through
923 # so that we'll hit the manifest cache if we're going through
924 # all the revisions in parent->child order.
924 # all the revisions in parent->child order.
925 mf1 = mfmatches(node1)
925 mf1 = mfmatches(node1)
926
926
927 mywlock = False
927 mywlock = False
928
928
929 # are we comparing the working directory?
929 # are we comparing the working directory?
930 if not node2:
930 if not node2:
931 (lookup, modified, added, removed, deleted, unknown,
931 (lookup, modified, added, removed, deleted, unknown,
932 ignored, clean) = self.dirstate.status(files, match,
932 ignored, clean) = self.dirstate.status(files, match,
933 list_ignored, list_clean)
933 list_ignored, list_clean)
934
934
935 # are we comparing working dir against its parent?
935 # are we comparing working dir against its parent?
936 if compareworking:
936 if compareworking:
937 if lookup:
937 if lookup:
938 # do a full compare of any files that might have changed
938 # do a full compare of any files that might have changed
939 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
939 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
940 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
940 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
941 nullid)
941 nullid)
942 for f in lookup:
942 for f in lookup:
943 if fcmp(f, getnode):
943 if fcmp(f, getnode):
944 modified.append(f)
944 modified.append(f)
945 else:
945 else:
946 clean.append(f)
946 clean.append(f)
947 if not wlock and not mywlock:
947 if not wlock and not mywlock:
948 mywlock = True
948 mywlock = True
949 try:
949 try:
950 wlock = self.wlock(wait=0)
950 wlock = self.wlock(wait=0)
951 except lock.LockException:
951 except lock.LockException:
952 pass
952 pass
953 if wlock:
953 if wlock:
954 self.dirstate.update([f], "n")
954 self.dirstate.update([f], "n")
955 else:
955 else:
956 # we are comparing working dir against non-parent
956 # we are comparing working dir against non-parent
957 # generate a pseudo-manifest for the working dir
957 # generate a pseudo-manifest for the working dir
958 # XXX: create it in dirstate.py ?
958 # XXX: create it in dirstate.py ?
959 mf2 = mfmatches(self.dirstate.parents()[0])
959 mf2 = mfmatches(self.dirstate.parents()[0])
960 is_exec = util.execfunc(self.root, mf2.execf)
960 is_exec = util.execfunc(self.root, mf2.execf)
961 is_link = util.linkfunc(self.root, mf2.linkf)
961 is_link = util.linkfunc(self.root, mf2.linkf)
962 for f in lookup + modified + added:
962 for f in lookup + modified + added:
963 mf2[f] = ""
963 mf2[f] = ""
964 mf2.set(f, is_exec(f), is_link(f))
964 mf2.set(f, is_exec(f), is_link(f))
965 for f in removed:
965 for f in removed:
966 if f in mf2:
966 if f in mf2:
967 del mf2[f]
967 del mf2[f]
968
968
969 if mywlock and wlock:
969 if mywlock and wlock:
970 wlock.release()
970 wlock.release()
971 else:
971 else:
972 # we are comparing two revisions
972 # we are comparing two revisions
973 mf2 = mfmatches(node2)
973 mf2 = mfmatches(node2)
974
974
975 if not compareworking:
975 if not compareworking:
976 # flush lists from dirstate before comparing manifests
976 # flush lists from dirstate before comparing manifests
977 modified, added, clean = [], [], []
977 modified, added, clean = [], [], []
978
978
979 # make sure to sort the files so we talk to the disk in a
979 # make sure to sort the files so we talk to the disk in a
980 # reasonable order
980 # reasonable order
981 mf2keys = mf2.keys()
981 mf2keys = mf2.keys()
982 mf2keys.sort()
982 mf2keys.sort()
983 getnode = lambda fn: mf1.get(fn, nullid)
983 getnode = lambda fn: mf1.get(fn, nullid)
984 for fn in mf2keys:
984 for fn in mf2keys:
985 if mf1.has_key(fn):
985 if mf1.has_key(fn):
986 if mf1.flags(fn) != mf2.flags(fn) or \
986 if mf1.flags(fn) != mf2.flags(fn) or \
987 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
987 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
988 fcmp(fn, getnode))):
988 fcmp(fn, getnode))):
989 modified.append(fn)
989 modified.append(fn)
990 elif list_clean:
990 elif list_clean:
991 clean.append(fn)
991 clean.append(fn)
992 del mf1[fn]
992 del mf1[fn]
993 else:
993 else:
994 added.append(fn)
994 added.append(fn)
995
995
996 removed = mf1.keys()
996 removed = mf1.keys()
997
997
998 # sort and return results:
998 # sort and return results:
999 for l in modified, added, removed, deleted, unknown, ignored, clean:
999 for l in modified, added, removed, deleted, unknown, ignored, clean:
1000 l.sort()
1000 l.sort()
1001 return (modified, added, removed, deleted, unknown, ignored, clean)
1001 return (modified, added, removed, deleted, unknown, ignored, clean)
1002
1002
1003 def add(self, list, wlock=None):
1003 def add(self, list, wlock=None):
1004 if not wlock:
1004 if not wlock:
1005 wlock = self.wlock()
1005 wlock = self.wlock()
1006 for f in list:
1006 for f in list:
1007 p = self.wjoin(f)
1007 p = self.wjoin(f)
1008 islink = os.path.islink(p)
1008 islink = os.path.islink(p)
1009 size = os.lstat(p).st_size
1010 if size > 10000000:
1011 self.ui.warn(_("%s: files over 10MB may cause memory and"
1012 " performance problems\n"
1013 "(use 'hg revert %s' to unadd the file)\n")
1014 % (f, f))
1009 if not islink and not os.path.exists(p):
1015 if not islink and not os.path.exists(p):
1010 self.ui.warn(_("%s does not exist!\n") % f)
1016 self.ui.warn(_("%s does not exist!\n") % f)
1011 elif not islink and not os.path.isfile(p):
1017 elif not islink and not os.path.isfile(p):
1012 self.ui.warn(_("%s not added: only files and symlinks "
1018 self.ui.warn(_("%s not added: only files and symlinks "
1013 "supported currently\n") % f)
1019 "supported currently\n") % f)
1014 elif self.dirstate.state(f) in 'an':
1020 elif self.dirstate.state(f) in 'an':
1015 self.ui.warn(_("%s already tracked!\n") % f)
1021 self.ui.warn(_("%s already tracked!\n") % f)
1016 else:
1022 else:
1017 self.dirstate.update([f], "a")
1023 self.dirstate.update([f], "a")
1018
1024
1019 def forget(self, list, wlock=None):
1025 def forget(self, list, wlock=None):
1020 if not wlock:
1026 if not wlock:
1021 wlock = self.wlock()
1027 wlock = self.wlock()
1022 for f in list:
1028 for f in list:
1023 if self.dirstate.state(f) not in 'ai':
1029 if self.dirstate.state(f) not in 'ai':
1024 self.ui.warn(_("%s not added!\n") % f)
1030 self.ui.warn(_("%s not added!\n") % f)
1025 else:
1031 else:
1026 self.dirstate.forget([f])
1032 self.dirstate.forget([f])
1027
1033
1028 def remove(self, list, unlink=False, wlock=None):
1034 def remove(self, list, unlink=False, wlock=None):
1029 if unlink:
1035 if unlink:
1030 for f in list:
1036 for f in list:
1031 try:
1037 try:
1032 util.unlink(self.wjoin(f))
1038 util.unlink(self.wjoin(f))
1033 except OSError, inst:
1039 except OSError, inst:
1034 if inst.errno != errno.ENOENT:
1040 if inst.errno != errno.ENOENT:
1035 raise
1041 raise
1036 if not wlock:
1042 if not wlock:
1037 wlock = self.wlock()
1043 wlock = self.wlock()
1038 for f in list:
1044 for f in list:
1039 if unlink and os.path.exists(self.wjoin(f)):
1045 if unlink and os.path.exists(self.wjoin(f)):
1040 self.ui.warn(_("%s still exists!\n") % f)
1046 self.ui.warn(_("%s still exists!\n") % f)
1041 elif self.dirstate.state(f) == 'a':
1047 elif self.dirstate.state(f) == 'a':
1042 self.dirstate.forget([f])
1048 self.dirstate.forget([f])
1043 elif f not in self.dirstate:
1049 elif f not in self.dirstate:
1044 self.ui.warn(_("%s not tracked!\n") % f)
1050 self.ui.warn(_("%s not tracked!\n") % f)
1045 else:
1051 else:
1046 self.dirstate.update([f], "r")
1052 self.dirstate.update([f], "r")
1047
1053
1048 def undelete(self, list, wlock=None):
1054 def undelete(self, list, wlock=None):
1049 p = self.dirstate.parents()[0]
1055 p = self.dirstate.parents()[0]
1050 mn = self.changelog.read(p)[0]
1056 mn = self.changelog.read(p)[0]
1051 m = self.manifest.read(mn)
1057 m = self.manifest.read(mn)
1052 if not wlock:
1058 if not wlock:
1053 wlock = self.wlock()
1059 wlock = self.wlock()
1054 for f in list:
1060 for f in list:
1055 if self.dirstate.state(f) not in "r":
1061 if self.dirstate.state(f) not in "r":
1056 self.ui.warn("%s not removed!\n" % f)
1062 self.ui.warn("%s not removed!\n" % f)
1057 else:
1063 else:
1058 t = self.file(f).read(m[f])
1064 t = self.file(f).read(m[f])
1059 self.wwrite(f, t, m.flags(f))
1065 self.wwrite(f, t, m.flags(f))
1060 self.dirstate.update([f], "n")
1066 self.dirstate.update([f], "n")
1061
1067
1062 def copy(self, source, dest, wlock=None):
1068 def copy(self, source, dest, wlock=None):
1063 p = self.wjoin(dest)
1069 p = self.wjoin(dest)
1064 if not (os.path.exists(p) or os.path.islink(p)):
1070 if not (os.path.exists(p) or os.path.islink(p)):
1065 self.ui.warn(_("%s does not exist!\n") % dest)
1071 self.ui.warn(_("%s does not exist!\n") % dest)
1066 elif not (os.path.isfile(p) or os.path.islink(p)):
1072 elif not (os.path.isfile(p) or os.path.islink(p)):
1067 self.ui.warn(_("copy failed: %s is not a file or a "
1073 self.ui.warn(_("copy failed: %s is not a file or a "
1068 "symbolic link\n") % dest)
1074 "symbolic link\n") % dest)
1069 else:
1075 else:
1070 if not wlock:
1076 if not wlock:
1071 wlock = self.wlock()
1077 wlock = self.wlock()
1072 if self.dirstate.state(dest) == '?':
1078 if self.dirstate.state(dest) == '?':
1073 self.dirstate.update([dest], "a")
1079 self.dirstate.update([dest], "a")
1074 self.dirstate.copy(source, dest)
1080 self.dirstate.copy(source, dest)
1075
1081
1076 def heads(self, start=None):
1082 def heads(self, start=None):
1077 heads = self.changelog.heads(start)
1083 heads = self.changelog.heads(start)
1078 # sort the output in rev descending order
1084 # sort the output in rev descending order
1079 heads = [(-self.changelog.rev(h), h) for h in heads]
1085 heads = [(-self.changelog.rev(h), h) for h in heads]
1080 heads.sort()
1086 heads.sort()
1081 return [n for (r, n) in heads]
1087 return [n for (r, n) in heads]
1082
1088
1083 def branches(self, nodes):
1089 def branches(self, nodes):
1084 if not nodes:
1090 if not nodes:
1085 nodes = [self.changelog.tip()]
1091 nodes = [self.changelog.tip()]
1086 b = []
1092 b = []
1087 for n in nodes:
1093 for n in nodes:
1088 t = n
1094 t = n
1089 while 1:
1095 while 1:
1090 p = self.changelog.parents(n)
1096 p = self.changelog.parents(n)
1091 if p[1] != nullid or p[0] == nullid:
1097 if p[1] != nullid or p[0] == nullid:
1092 b.append((t, n, p[0], p[1]))
1098 b.append((t, n, p[0], p[1]))
1093 break
1099 break
1094 n = p[0]
1100 n = p[0]
1095 return b
1101 return b
1096
1102
1097 def between(self, pairs):
1103 def between(self, pairs):
1098 r = []
1104 r = []
1099
1105
1100 for top, bottom in pairs:
1106 for top, bottom in pairs:
1101 n, l, i = top, [], 0
1107 n, l, i = top, [], 0
1102 f = 1
1108 f = 1
1103
1109
1104 while n != bottom:
1110 while n != bottom:
1105 p = self.changelog.parents(n)[0]
1111 p = self.changelog.parents(n)[0]
1106 if i == f:
1112 if i == f:
1107 l.append(n)
1113 l.append(n)
1108 f = f * 2
1114 f = f * 2
1109 n = p
1115 n = p
1110 i += 1
1116 i += 1
1111
1117
1112 r.append(l)
1118 r.append(l)
1113
1119
1114 return r
1120 return r
1115
1121
1116 def findincoming(self, remote, base=None, heads=None, force=False):
1122 def findincoming(self, remote, base=None, heads=None, force=False):
1117 """Return list of roots of the subsets of missing nodes from remote
1123 """Return list of roots of the subsets of missing nodes from remote
1118
1124
1119 If base dict is specified, assume that these nodes and their parents
1125 If base dict is specified, assume that these nodes and their parents
1120 exist on the remote side and that no child of a node of base exists
1126 exist on the remote side and that no child of a node of base exists
1121 in both remote and self.
1127 in both remote and self.
1122 Furthermore base will be updated to include the nodes that exists
1128 Furthermore base will be updated to include the nodes that exists
1123 in self and remote but no children exists in self and remote.
1129 in self and remote but no children exists in self and remote.
1124 If a list of heads is specified, return only nodes which are heads
1130 If a list of heads is specified, return only nodes which are heads
1125 or ancestors of these heads.
1131 or ancestors of these heads.
1126
1132
1127 All the ancestors of base are in self and in remote.
1133 All the ancestors of base are in self and in remote.
1128 All the descendants of the list returned are missing in self.
1134 All the descendants of the list returned are missing in self.
1129 (and so we know that the rest of the nodes are missing in remote, see
1135 (and so we know that the rest of the nodes are missing in remote, see
1130 outgoing)
1136 outgoing)
1131 """
1137 """
1132 m = self.changelog.nodemap
1138 m = self.changelog.nodemap
1133 search = []
1139 search = []
1134 fetch = {}
1140 fetch = {}
1135 seen = {}
1141 seen = {}
1136 seenbranch = {}
1142 seenbranch = {}
1137 if base == None:
1143 if base == None:
1138 base = {}
1144 base = {}
1139
1145
1140 if not heads:
1146 if not heads:
1141 heads = remote.heads()
1147 heads = remote.heads()
1142
1148
1143 if self.changelog.tip() == nullid:
1149 if self.changelog.tip() == nullid:
1144 base[nullid] = 1
1150 base[nullid] = 1
1145 if heads != [nullid]:
1151 if heads != [nullid]:
1146 return [nullid]
1152 return [nullid]
1147 return []
1153 return []
1148
1154
1149 # assume we're closer to the tip than the root
1155 # assume we're closer to the tip than the root
1150 # and start by examining the heads
1156 # and start by examining the heads
1151 self.ui.status(_("searching for changes\n"))
1157 self.ui.status(_("searching for changes\n"))
1152
1158
1153 unknown = []
1159 unknown = []
1154 for h in heads:
1160 for h in heads:
1155 if h not in m:
1161 if h not in m:
1156 unknown.append(h)
1162 unknown.append(h)
1157 else:
1163 else:
1158 base[h] = 1
1164 base[h] = 1
1159
1165
1160 if not unknown:
1166 if not unknown:
1161 return []
1167 return []
1162
1168
1163 req = dict.fromkeys(unknown)
1169 req = dict.fromkeys(unknown)
1164 reqcnt = 0
1170 reqcnt = 0
1165
1171
1166 # search through remote branches
1172 # search through remote branches
1167 # a 'branch' here is a linear segment of history, with four parts:
1173 # a 'branch' here is a linear segment of history, with four parts:
1168 # head, root, first parent, second parent
1174 # head, root, first parent, second parent
1169 # (a branch always has two parents (or none) by definition)
1175 # (a branch always has two parents (or none) by definition)
1170 unknown = remote.branches(unknown)
1176 unknown = remote.branches(unknown)
1171 while unknown:
1177 while unknown:
1172 r = []
1178 r = []
1173 while unknown:
1179 while unknown:
1174 n = unknown.pop(0)
1180 n = unknown.pop(0)
1175 if n[0] in seen:
1181 if n[0] in seen:
1176 continue
1182 continue
1177
1183
1178 self.ui.debug(_("examining %s:%s\n")
1184 self.ui.debug(_("examining %s:%s\n")
1179 % (short(n[0]), short(n[1])))
1185 % (short(n[0]), short(n[1])))
1180 if n[0] == nullid: # found the end of the branch
1186 if n[0] == nullid: # found the end of the branch
1181 pass
1187 pass
1182 elif n in seenbranch:
1188 elif n in seenbranch:
1183 self.ui.debug(_("branch already found\n"))
1189 self.ui.debug(_("branch already found\n"))
1184 continue
1190 continue
1185 elif n[1] and n[1] in m: # do we know the base?
1191 elif n[1] and n[1] in m: # do we know the base?
1186 self.ui.debug(_("found incomplete branch %s:%s\n")
1192 self.ui.debug(_("found incomplete branch %s:%s\n")
1187 % (short(n[0]), short(n[1])))
1193 % (short(n[0]), short(n[1])))
1188 search.append(n) # schedule branch range for scanning
1194 search.append(n) # schedule branch range for scanning
1189 seenbranch[n] = 1
1195 seenbranch[n] = 1
1190 else:
1196 else:
1191 if n[1] not in seen and n[1] not in fetch:
1197 if n[1] not in seen and n[1] not in fetch:
1192 if n[2] in m and n[3] in m:
1198 if n[2] in m and n[3] in m:
1193 self.ui.debug(_("found new changeset %s\n") %
1199 self.ui.debug(_("found new changeset %s\n") %
1194 short(n[1]))
1200 short(n[1]))
1195 fetch[n[1]] = 1 # earliest unknown
1201 fetch[n[1]] = 1 # earliest unknown
1196 for p in n[2:4]:
1202 for p in n[2:4]:
1197 if p in m:
1203 if p in m:
1198 base[p] = 1 # latest known
1204 base[p] = 1 # latest known
1199
1205
1200 for p in n[2:4]:
1206 for p in n[2:4]:
1201 if p not in req and p not in m:
1207 if p not in req and p not in m:
1202 r.append(p)
1208 r.append(p)
1203 req[p] = 1
1209 req[p] = 1
1204 seen[n[0]] = 1
1210 seen[n[0]] = 1
1205
1211
1206 if r:
1212 if r:
1207 reqcnt += 1
1213 reqcnt += 1
1208 self.ui.debug(_("request %d: %s\n") %
1214 self.ui.debug(_("request %d: %s\n") %
1209 (reqcnt, " ".join(map(short, r))))
1215 (reqcnt, " ".join(map(short, r))))
1210 for p in xrange(0, len(r), 10):
1216 for p in xrange(0, len(r), 10):
1211 for b in remote.branches(r[p:p+10]):
1217 for b in remote.branches(r[p:p+10]):
1212 self.ui.debug(_("received %s:%s\n") %
1218 self.ui.debug(_("received %s:%s\n") %
1213 (short(b[0]), short(b[1])))
1219 (short(b[0]), short(b[1])))
1214 unknown.append(b)
1220 unknown.append(b)
1215
1221
1216 # do binary search on the branches we found
1222 # do binary search on the branches we found
1217 while search:
1223 while search:
1218 n = search.pop(0)
1224 n = search.pop(0)
1219 reqcnt += 1
1225 reqcnt += 1
1220 l = remote.between([(n[0], n[1])])[0]
1226 l = remote.between([(n[0], n[1])])[0]
1221 l.append(n[1])
1227 l.append(n[1])
1222 p = n[0]
1228 p = n[0]
1223 f = 1
1229 f = 1
1224 for i in l:
1230 for i in l:
1225 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1231 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1226 if i in m:
1232 if i in m:
1227 if f <= 2:
1233 if f <= 2:
1228 self.ui.debug(_("found new branch changeset %s\n") %
1234 self.ui.debug(_("found new branch changeset %s\n") %
1229 short(p))
1235 short(p))
1230 fetch[p] = 1
1236 fetch[p] = 1
1231 base[i] = 1
1237 base[i] = 1
1232 else:
1238 else:
1233 self.ui.debug(_("narrowed branch search to %s:%s\n")
1239 self.ui.debug(_("narrowed branch search to %s:%s\n")
1234 % (short(p), short(i)))
1240 % (short(p), short(i)))
1235 search.append((p, i))
1241 search.append((p, i))
1236 break
1242 break
1237 p, f = i, f * 2
1243 p, f = i, f * 2
1238
1244
1239 # sanity check our fetch list
1245 # sanity check our fetch list
1240 for f in fetch.keys():
1246 for f in fetch.keys():
1241 if f in m:
1247 if f in m:
1242 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1248 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1243
1249
1244 if base.keys() == [nullid]:
1250 if base.keys() == [nullid]:
1245 if force:
1251 if force:
1246 self.ui.warn(_("warning: repository is unrelated\n"))
1252 self.ui.warn(_("warning: repository is unrelated\n"))
1247 else:
1253 else:
1248 raise util.Abort(_("repository is unrelated"))
1254 raise util.Abort(_("repository is unrelated"))
1249
1255
1250 self.ui.debug(_("found new changesets starting at ") +
1256 self.ui.debug(_("found new changesets starting at ") +
1251 " ".join([short(f) for f in fetch]) + "\n")
1257 " ".join([short(f) for f in fetch]) + "\n")
1252
1258
1253 self.ui.debug(_("%d total queries\n") % reqcnt)
1259 self.ui.debug(_("%d total queries\n") % reqcnt)
1254
1260
1255 return fetch.keys()
1261 return fetch.keys()
1256
1262
1257 def findoutgoing(self, remote, base=None, heads=None, force=False):
1263 def findoutgoing(self, remote, base=None, heads=None, force=False):
1258 """Return list of nodes that are roots of subsets not in remote
1264 """Return list of nodes that are roots of subsets not in remote
1259
1265
1260 If base dict is specified, assume that these nodes and their parents
1266 If base dict is specified, assume that these nodes and their parents
1261 exist on the remote side.
1267 exist on the remote side.
1262 If a list of heads is specified, return only nodes which are heads
1268 If a list of heads is specified, return only nodes which are heads
1263 or ancestors of these heads, and return a second element which
1269 or ancestors of these heads, and return a second element which
1264 contains all remote heads which get new children.
1270 contains all remote heads which get new children.
1265 """
1271 """
1266 if base == None:
1272 if base == None:
1267 base = {}
1273 base = {}
1268 self.findincoming(remote, base, heads, force=force)
1274 self.findincoming(remote, base, heads, force=force)
1269
1275
1270 self.ui.debug(_("common changesets up to ")
1276 self.ui.debug(_("common changesets up to ")
1271 + " ".join(map(short, base.keys())) + "\n")
1277 + " ".join(map(short, base.keys())) + "\n")
1272
1278
1273 remain = dict.fromkeys(self.changelog.nodemap)
1279 remain = dict.fromkeys(self.changelog.nodemap)
1274
1280
1275 # prune everything remote has from the tree
1281 # prune everything remote has from the tree
1276 del remain[nullid]
1282 del remain[nullid]
1277 remove = base.keys()
1283 remove = base.keys()
1278 while remove:
1284 while remove:
1279 n = remove.pop(0)
1285 n = remove.pop(0)
1280 if n in remain:
1286 if n in remain:
1281 del remain[n]
1287 del remain[n]
1282 for p in self.changelog.parents(n):
1288 for p in self.changelog.parents(n):
1283 remove.append(p)
1289 remove.append(p)
1284
1290
1285 # find every node whose parents have been pruned
1291 # find every node whose parents have been pruned
1286 subset = []
1292 subset = []
1287 # find every remote head that will get new children
1293 # find every remote head that will get new children
1288 updated_heads = {}
1294 updated_heads = {}
1289 for n in remain:
1295 for n in remain:
1290 p1, p2 = self.changelog.parents(n)
1296 p1, p2 = self.changelog.parents(n)
1291 if p1 not in remain and p2 not in remain:
1297 if p1 not in remain and p2 not in remain:
1292 subset.append(n)
1298 subset.append(n)
1293 if heads:
1299 if heads:
1294 if p1 in heads:
1300 if p1 in heads:
1295 updated_heads[p1] = True
1301 updated_heads[p1] = True
1296 if p2 in heads:
1302 if p2 in heads:
1297 updated_heads[p2] = True
1303 updated_heads[p2] = True
1298
1304
1299 # this is the set of all roots we have to push
1305 # this is the set of all roots we have to push
1300 if heads:
1306 if heads:
1301 return subset, updated_heads.keys()
1307 return subset, updated_heads.keys()
1302 else:
1308 else:
1303 return subset
1309 return subset
1304
1310
1305 def pull(self, remote, heads=None, force=False, lock=None):
1311 def pull(self, remote, heads=None, force=False, lock=None):
1306 mylock = False
1312 mylock = False
1307 if not lock:
1313 if not lock:
1308 lock = self.lock()
1314 lock = self.lock()
1309 mylock = True
1315 mylock = True
1310
1316
1311 try:
1317 try:
1312 fetch = self.findincoming(remote, force=force)
1318 fetch = self.findincoming(remote, force=force)
1313 if fetch == [nullid]:
1319 if fetch == [nullid]:
1314 self.ui.status(_("requesting all changes\n"))
1320 self.ui.status(_("requesting all changes\n"))
1315
1321
1316 if not fetch:
1322 if not fetch:
1317 self.ui.status(_("no changes found\n"))
1323 self.ui.status(_("no changes found\n"))
1318 return 0
1324 return 0
1319
1325
1320 if heads is None:
1326 if heads is None:
1321 cg = remote.changegroup(fetch, 'pull')
1327 cg = remote.changegroup(fetch, 'pull')
1322 else:
1328 else:
1323 if 'changegroupsubset' not in remote.capabilities:
1329 if 'changegroupsubset' not in remote.capabilities:
1324 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1330 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1325 cg = remote.changegroupsubset(fetch, heads, 'pull')
1331 cg = remote.changegroupsubset(fetch, heads, 'pull')
1326 return self.addchangegroup(cg, 'pull', remote.url())
1332 return self.addchangegroup(cg, 'pull', remote.url())
1327 finally:
1333 finally:
1328 if mylock:
1334 if mylock:
1329 lock.release()
1335 lock.release()
1330
1336
1331 def push(self, remote, force=False, revs=None):
1337 def push(self, remote, force=False, revs=None):
1332 # there are two ways to push to remote repo:
1338 # there are two ways to push to remote repo:
1333 #
1339 #
1334 # addchangegroup assumes local user can lock remote
1340 # addchangegroup assumes local user can lock remote
1335 # repo (local filesystem, old ssh servers).
1341 # repo (local filesystem, old ssh servers).
1336 #
1342 #
1337 # unbundle assumes local user cannot lock remote repo (new ssh
1343 # unbundle assumes local user cannot lock remote repo (new ssh
1338 # servers, http servers).
1344 # servers, http servers).
1339
1345
1340 if remote.capable('unbundle'):
1346 if remote.capable('unbundle'):
1341 return self.push_unbundle(remote, force, revs)
1347 return self.push_unbundle(remote, force, revs)
1342 return self.push_addchangegroup(remote, force, revs)
1348 return self.push_addchangegroup(remote, force, revs)
1343
1349
1344 def prepush(self, remote, force, revs):
1350 def prepush(self, remote, force, revs):
1345 base = {}
1351 base = {}
1346 remote_heads = remote.heads()
1352 remote_heads = remote.heads()
1347 inc = self.findincoming(remote, base, remote_heads, force=force)
1353 inc = self.findincoming(remote, base, remote_heads, force=force)
1348
1354
1349 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1355 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1350 if revs is not None:
1356 if revs is not None:
1351 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1357 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1352 else:
1358 else:
1353 bases, heads = update, self.changelog.heads()
1359 bases, heads = update, self.changelog.heads()
1354
1360
1355 if not bases:
1361 if not bases:
1356 self.ui.status(_("no changes found\n"))
1362 self.ui.status(_("no changes found\n"))
1357 return None, 1
1363 return None, 1
1358 elif not force:
1364 elif not force:
1359 # check if we're creating new remote heads
1365 # check if we're creating new remote heads
1360 # to be a remote head after push, node must be either
1366 # to be a remote head after push, node must be either
1361 # - unknown locally
1367 # - unknown locally
1362 # - a local outgoing head descended from update
1368 # - a local outgoing head descended from update
1363 # - a remote head that's known locally and not
1369 # - a remote head that's known locally and not
1364 # ancestral to an outgoing head
1370 # ancestral to an outgoing head
1365
1371
1366 warn = 0
1372 warn = 0
1367
1373
1368 if remote_heads == [nullid]:
1374 if remote_heads == [nullid]:
1369 warn = 0
1375 warn = 0
1370 elif not revs and len(heads) > len(remote_heads):
1376 elif not revs and len(heads) > len(remote_heads):
1371 warn = 1
1377 warn = 1
1372 else:
1378 else:
1373 newheads = list(heads)
1379 newheads = list(heads)
1374 for r in remote_heads:
1380 for r in remote_heads:
1375 if r in self.changelog.nodemap:
1381 if r in self.changelog.nodemap:
1376 desc = self.changelog.heads(r, heads)
1382 desc = self.changelog.heads(r, heads)
1377 l = [h for h in heads if h in desc]
1383 l = [h for h in heads if h in desc]
1378 if not l:
1384 if not l:
1379 newheads.append(r)
1385 newheads.append(r)
1380 else:
1386 else:
1381 newheads.append(r)
1387 newheads.append(r)
1382 if len(newheads) > len(remote_heads):
1388 if len(newheads) > len(remote_heads):
1383 warn = 1
1389 warn = 1
1384
1390
1385 if warn:
1391 if warn:
1386 self.ui.warn(_("abort: push creates new remote branches!\n"))
1392 self.ui.warn(_("abort: push creates new remote branches!\n"))
1387 self.ui.status(_("(did you forget to merge?"
1393 self.ui.status(_("(did you forget to merge?"
1388 " use push -f to force)\n"))
1394 " use push -f to force)\n"))
1389 return None, 1
1395 return None, 1
1390 elif inc:
1396 elif inc:
1391 self.ui.warn(_("note: unsynced remote changes!\n"))
1397 self.ui.warn(_("note: unsynced remote changes!\n"))
1392
1398
1393
1399
1394 if revs is None:
1400 if revs is None:
1395 cg = self.changegroup(update, 'push')
1401 cg = self.changegroup(update, 'push')
1396 else:
1402 else:
1397 cg = self.changegroupsubset(update, revs, 'push')
1403 cg = self.changegroupsubset(update, revs, 'push')
1398 return cg, remote_heads
1404 return cg, remote_heads
1399
1405
1400 def push_addchangegroup(self, remote, force, revs):
1406 def push_addchangegroup(self, remote, force, revs):
1401 lock = remote.lock()
1407 lock = remote.lock()
1402
1408
1403 ret = self.prepush(remote, force, revs)
1409 ret = self.prepush(remote, force, revs)
1404 if ret[0] is not None:
1410 if ret[0] is not None:
1405 cg, remote_heads = ret
1411 cg, remote_heads = ret
1406 return remote.addchangegroup(cg, 'push', self.url())
1412 return remote.addchangegroup(cg, 'push', self.url())
1407 return ret[1]
1413 return ret[1]
1408
1414
1409 def push_unbundle(self, remote, force, revs):
1415 def push_unbundle(self, remote, force, revs):
1410 # local repo finds heads on server, finds out what revs it
1416 # local repo finds heads on server, finds out what revs it
1411 # must push. once revs transferred, if server finds it has
1417 # must push. once revs transferred, if server finds it has
1412 # different heads (someone else won commit/push race), server
1418 # different heads (someone else won commit/push race), server
1413 # aborts.
1419 # aborts.
1414
1420
1415 ret = self.prepush(remote, force, revs)
1421 ret = self.prepush(remote, force, revs)
1416 if ret[0] is not None:
1422 if ret[0] is not None:
1417 cg, remote_heads = ret
1423 cg, remote_heads = ret
1418 if force: remote_heads = ['force']
1424 if force: remote_heads = ['force']
1419 return remote.unbundle(cg, remote_heads, 'push')
1425 return remote.unbundle(cg, remote_heads, 'push')
1420 return ret[1]
1426 return ret[1]
1421
1427
1422 def changegroupinfo(self, nodes):
1428 def changegroupinfo(self, nodes):
1423 self.ui.note(_("%d changesets found\n") % len(nodes))
1429 self.ui.note(_("%d changesets found\n") % len(nodes))
1424 if self.ui.debugflag:
1430 if self.ui.debugflag:
1425 self.ui.debug(_("List of changesets:\n"))
1431 self.ui.debug(_("List of changesets:\n"))
1426 for node in nodes:
1432 for node in nodes:
1427 self.ui.debug("%s\n" % hex(node))
1433 self.ui.debug("%s\n" % hex(node))
1428
1434
1429 def changegroupsubset(self, bases, heads, source):
1435 def changegroupsubset(self, bases, heads, source):
1430 """This function generates a changegroup consisting of all the nodes
1436 """This function generates a changegroup consisting of all the nodes
1431 that are descendents of any of the bases, and ancestors of any of
1437 that are descendents of any of the bases, and ancestors of any of
1432 the heads.
1438 the heads.
1433
1439
1434 It is fairly complex as determining which filenodes and which
1440 It is fairly complex as determining which filenodes and which
1435 manifest nodes need to be included for the changeset to be complete
1441 manifest nodes need to be included for the changeset to be complete
1436 is non-trivial.
1442 is non-trivial.
1437
1443
1438 Another wrinkle is doing the reverse, figuring out which changeset in
1444 Another wrinkle is doing the reverse, figuring out which changeset in
1439 the changegroup a particular filenode or manifestnode belongs to."""
1445 the changegroup a particular filenode or manifestnode belongs to."""
1440
1446
1441 self.hook('preoutgoing', throw=True, source=source)
1447 self.hook('preoutgoing', throw=True, source=source)
1442
1448
1443 # Set up some initial variables
1449 # Set up some initial variables
1444 # Make it easy to refer to self.changelog
1450 # Make it easy to refer to self.changelog
1445 cl = self.changelog
1451 cl = self.changelog
1446 # msng is short for missing - compute the list of changesets in this
1452 # msng is short for missing - compute the list of changesets in this
1447 # changegroup.
1453 # changegroup.
1448 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1454 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1449 self.changegroupinfo(msng_cl_lst)
1455 self.changegroupinfo(msng_cl_lst)
1450 # Some bases may turn out to be superfluous, and some heads may be
1456 # Some bases may turn out to be superfluous, and some heads may be
1451 # too. nodesbetween will return the minimal set of bases and heads
1457 # too. nodesbetween will return the minimal set of bases and heads
1452 # necessary to re-create the changegroup.
1458 # necessary to re-create the changegroup.
1453
1459
1454 # Known heads are the list of heads that it is assumed the recipient
1460 # Known heads are the list of heads that it is assumed the recipient
1455 # of this changegroup will know about.
1461 # of this changegroup will know about.
1456 knownheads = {}
1462 knownheads = {}
1457 # We assume that all parents of bases are known heads.
1463 # We assume that all parents of bases are known heads.
1458 for n in bases:
1464 for n in bases:
1459 for p in cl.parents(n):
1465 for p in cl.parents(n):
1460 if p != nullid:
1466 if p != nullid:
1461 knownheads[p] = 1
1467 knownheads[p] = 1
1462 knownheads = knownheads.keys()
1468 knownheads = knownheads.keys()
1463 if knownheads:
1469 if knownheads:
1464 # Now that we know what heads are known, we can compute which
1470 # Now that we know what heads are known, we can compute which
1465 # changesets are known. The recipient must know about all
1471 # changesets are known. The recipient must know about all
1466 # changesets required to reach the known heads from the null
1472 # changesets required to reach the known heads from the null
1467 # changeset.
1473 # changeset.
1468 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1474 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1469 junk = None
1475 junk = None
1470 # Transform the list into an ersatz set.
1476 # Transform the list into an ersatz set.
1471 has_cl_set = dict.fromkeys(has_cl_set)
1477 has_cl_set = dict.fromkeys(has_cl_set)
1472 else:
1478 else:
1473 # If there were no known heads, the recipient cannot be assumed to
1479 # If there were no known heads, the recipient cannot be assumed to
1474 # know about any changesets.
1480 # know about any changesets.
1475 has_cl_set = {}
1481 has_cl_set = {}
1476
1482
1477 # Make it easy to refer to self.manifest
1483 # Make it easy to refer to self.manifest
1478 mnfst = self.manifest
1484 mnfst = self.manifest
1479 # We don't know which manifests are missing yet
1485 # We don't know which manifests are missing yet
1480 msng_mnfst_set = {}
1486 msng_mnfst_set = {}
1481 # Nor do we know which filenodes are missing.
1487 # Nor do we know which filenodes are missing.
1482 msng_filenode_set = {}
1488 msng_filenode_set = {}
1483
1489
1484 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1490 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1485 junk = None
1491 junk = None
1486
1492
1487 # A changeset always belongs to itself, so the changenode lookup
1493 # A changeset always belongs to itself, so the changenode lookup
1488 # function for a changenode is identity.
1494 # function for a changenode is identity.
1489 def identity(x):
1495 def identity(x):
1490 return x
1496 return x
1491
1497
1492 # A function generating function. Sets up an environment for the
1498 # A function generating function. Sets up an environment for the
1493 # inner function.
1499 # inner function.
1494 def cmp_by_rev_func(revlog):
1500 def cmp_by_rev_func(revlog):
1495 # Compare two nodes by their revision number in the environment's
1501 # Compare two nodes by their revision number in the environment's
1496 # revision history. Since the revision number both represents the
1502 # revision history. Since the revision number both represents the
1497 # most efficient order to read the nodes in, and represents a
1503 # most efficient order to read the nodes in, and represents a
1498 # topological sorting of the nodes, this function is often useful.
1504 # topological sorting of the nodes, this function is often useful.
1499 def cmp_by_rev(a, b):
1505 def cmp_by_rev(a, b):
1500 return cmp(revlog.rev(a), revlog.rev(b))
1506 return cmp(revlog.rev(a), revlog.rev(b))
1501 return cmp_by_rev
1507 return cmp_by_rev
1502
1508
1503 # If we determine that a particular file or manifest node must be a
1509 # If we determine that a particular file or manifest node must be a
1504 # node that the recipient of the changegroup will already have, we can
1510 # node that the recipient of the changegroup will already have, we can
1505 # also assume the recipient will have all the parents. This function
1511 # also assume the recipient will have all the parents. This function
1506 # prunes them from the set of missing nodes.
1512 # prunes them from the set of missing nodes.
1507 def prune_parents(revlog, hasset, msngset):
1513 def prune_parents(revlog, hasset, msngset):
1508 haslst = hasset.keys()
1514 haslst = hasset.keys()
1509 haslst.sort(cmp_by_rev_func(revlog))
1515 haslst.sort(cmp_by_rev_func(revlog))
1510 for node in haslst:
1516 for node in haslst:
1511 parentlst = [p for p in revlog.parents(node) if p != nullid]
1517 parentlst = [p for p in revlog.parents(node) if p != nullid]
1512 while parentlst:
1518 while parentlst:
1513 n = parentlst.pop()
1519 n = parentlst.pop()
1514 if n not in hasset:
1520 if n not in hasset:
1515 hasset[n] = 1
1521 hasset[n] = 1
1516 p = [p for p in revlog.parents(n) if p != nullid]
1522 p = [p for p in revlog.parents(n) if p != nullid]
1517 parentlst.extend(p)
1523 parentlst.extend(p)
1518 for n in hasset:
1524 for n in hasset:
1519 msngset.pop(n, None)
1525 msngset.pop(n, None)
1520
1526
1521 # This is a function generating function used to set up an environment
1527 # This is a function generating function used to set up an environment
1522 # for the inner function to execute in.
1528 # for the inner function to execute in.
1523 def manifest_and_file_collector(changedfileset):
1529 def manifest_and_file_collector(changedfileset):
1524 # This is an information gathering function that gathers
1530 # This is an information gathering function that gathers
1525 # information from each changeset node that goes out as part of
1531 # information from each changeset node that goes out as part of
1526 # the changegroup. The information gathered is a list of which
1532 # the changegroup. The information gathered is a list of which
1527 # manifest nodes are potentially required (the recipient may
1533 # manifest nodes are potentially required (the recipient may
1528 # already have them) and total list of all files which were
1534 # already have them) and total list of all files which were
1529 # changed in any changeset in the changegroup.
1535 # changed in any changeset in the changegroup.
1530 #
1536 #
1531 # We also remember the first changenode we saw any manifest
1537 # We also remember the first changenode we saw any manifest
1532 # referenced by so we can later determine which changenode 'owns'
1538 # referenced by so we can later determine which changenode 'owns'
1533 # the manifest.
1539 # the manifest.
1534 def collect_manifests_and_files(clnode):
1540 def collect_manifests_and_files(clnode):
1535 c = cl.read(clnode)
1541 c = cl.read(clnode)
1536 for f in c[3]:
1542 for f in c[3]:
1537 # This is to make sure we only have one instance of each
1543 # This is to make sure we only have one instance of each
1538 # filename string for each filename.
1544 # filename string for each filename.
1539 changedfileset.setdefault(f, f)
1545 changedfileset.setdefault(f, f)
1540 msng_mnfst_set.setdefault(c[0], clnode)
1546 msng_mnfst_set.setdefault(c[0], clnode)
1541 return collect_manifests_and_files
1547 return collect_manifests_and_files
1542
1548
1543 # Figure out which manifest nodes (of the ones we think might be part
1549 # Figure out which manifest nodes (of the ones we think might be part
1544 # of the changegroup) the recipient must know about and remove them
1550 # of the changegroup) the recipient must know about and remove them
1545 # from the changegroup.
1551 # from the changegroup.
1546 def prune_manifests():
1552 def prune_manifests():
1547 has_mnfst_set = {}
1553 has_mnfst_set = {}
1548 for n in msng_mnfst_set:
1554 for n in msng_mnfst_set:
1549 # If a 'missing' manifest thinks it belongs to a changenode
1555 # If a 'missing' manifest thinks it belongs to a changenode
1550 # the recipient is assumed to have, obviously the recipient
1556 # the recipient is assumed to have, obviously the recipient
1551 # must have that manifest.
1557 # must have that manifest.
1552 linknode = cl.node(mnfst.linkrev(n))
1558 linknode = cl.node(mnfst.linkrev(n))
1553 if linknode in has_cl_set:
1559 if linknode in has_cl_set:
1554 has_mnfst_set[n] = 1
1560 has_mnfst_set[n] = 1
1555 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1561 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1556
1562
1557 # Use the information collected in collect_manifests_and_files to say
1563 # Use the information collected in collect_manifests_and_files to say
1558 # which changenode any manifestnode belongs to.
1564 # which changenode any manifestnode belongs to.
1559 def lookup_manifest_link(mnfstnode):
1565 def lookup_manifest_link(mnfstnode):
1560 return msng_mnfst_set[mnfstnode]
1566 return msng_mnfst_set[mnfstnode]
1561
1567
1562 # A function generating function that sets up the initial environment
1568 # A function generating function that sets up the initial environment
1563 # the inner function.
1569 # the inner function.
1564 def filenode_collector(changedfiles):
1570 def filenode_collector(changedfiles):
1565 next_rev = [0]
1571 next_rev = [0]
1566 # This gathers information from each manifestnode included in the
1572 # This gathers information from each manifestnode included in the
1567 # changegroup about which filenodes the manifest node references
1573 # changegroup about which filenodes the manifest node references
1568 # so we can include those in the changegroup too.
1574 # so we can include those in the changegroup too.
1569 #
1575 #
1570 # It also remembers which changenode each filenode belongs to. It
1576 # It also remembers which changenode each filenode belongs to. It
1571 # does this by assuming the a filenode belongs to the changenode
1577 # does this by assuming the a filenode belongs to the changenode
1572 # the first manifest that references it belongs to.
1578 # the first manifest that references it belongs to.
1573 def collect_msng_filenodes(mnfstnode):
1579 def collect_msng_filenodes(mnfstnode):
1574 r = mnfst.rev(mnfstnode)
1580 r = mnfst.rev(mnfstnode)
1575 if r == next_rev[0]:
1581 if r == next_rev[0]:
1576 # If the last rev we looked at was the one just previous,
1582 # If the last rev we looked at was the one just previous,
1577 # we only need to see a diff.
1583 # we only need to see a diff.
1578 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1584 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1579 # For each line in the delta
1585 # For each line in the delta
1580 for dline in delta.splitlines():
1586 for dline in delta.splitlines():
1581 # get the filename and filenode for that line
1587 # get the filename and filenode for that line
1582 f, fnode = dline.split('\0')
1588 f, fnode = dline.split('\0')
1583 fnode = bin(fnode[:40])
1589 fnode = bin(fnode[:40])
1584 f = changedfiles.get(f, None)
1590 f = changedfiles.get(f, None)
1585 # And if the file is in the list of files we care
1591 # And if the file is in the list of files we care
1586 # about.
1592 # about.
1587 if f is not None:
1593 if f is not None:
1588 # Get the changenode this manifest belongs to
1594 # Get the changenode this manifest belongs to
1589 clnode = msng_mnfst_set[mnfstnode]
1595 clnode = msng_mnfst_set[mnfstnode]
1590 # Create the set of filenodes for the file if
1596 # Create the set of filenodes for the file if
1591 # there isn't one already.
1597 # there isn't one already.
1592 ndset = msng_filenode_set.setdefault(f, {})
1598 ndset = msng_filenode_set.setdefault(f, {})
1593 # And set the filenode's changelog node to the
1599 # And set the filenode's changelog node to the
1594 # manifest's if it hasn't been set already.
1600 # manifest's if it hasn't been set already.
1595 ndset.setdefault(fnode, clnode)
1601 ndset.setdefault(fnode, clnode)
1596 else:
1602 else:
1597 # Otherwise we need a full manifest.
1603 # Otherwise we need a full manifest.
1598 m = mnfst.read(mnfstnode)
1604 m = mnfst.read(mnfstnode)
1599 # For every file in we care about.
1605 # For every file in we care about.
1600 for f in changedfiles:
1606 for f in changedfiles:
1601 fnode = m.get(f, None)
1607 fnode = m.get(f, None)
1602 # If it's in the manifest
1608 # If it's in the manifest
1603 if fnode is not None:
1609 if fnode is not None:
1604 # See comments above.
1610 # See comments above.
1605 clnode = msng_mnfst_set[mnfstnode]
1611 clnode = msng_mnfst_set[mnfstnode]
1606 ndset = msng_filenode_set.setdefault(f, {})
1612 ndset = msng_filenode_set.setdefault(f, {})
1607 ndset.setdefault(fnode, clnode)
1613 ndset.setdefault(fnode, clnode)
1608 # Remember the revision we hope to see next.
1614 # Remember the revision we hope to see next.
1609 next_rev[0] = r + 1
1615 next_rev[0] = r + 1
1610 return collect_msng_filenodes
1616 return collect_msng_filenodes
1611
1617
1612 # We have a list of filenodes we think we need for a file, lets remove
1618 # We have a list of filenodes we think we need for a file, lets remove
1613 # all those we now the recipient must have.
1619 # all those we now the recipient must have.
1614 def prune_filenodes(f, filerevlog):
1620 def prune_filenodes(f, filerevlog):
1615 msngset = msng_filenode_set[f]
1621 msngset = msng_filenode_set[f]
1616 hasset = {}
1622 hasset = {}
1617 # If a 'missing' filenode thinks it belongs to a changenode we
1623 # If a 'missing' filenode thinks it belongs to a changenode we
1618 # assume the recipient must have, then the recipient must have
1624 # assume the recipient must have, then the recipient must have
1619 # that filenode.
1625 # that filenode.
1620 for n in msngset:
1626 for n in msngset:
1621 clnode = cl.node(filerevlog.linkrev(n))
1627 clnode = cl.node(filerevlog.linkrev(n))
1622 if clnode in has_cl_set:
1628 if clnode in has_cl_set:
1623 hasset[n] = 1
1629 hasset[n] = 1
1624 prune_parents(filerevlog, hasset, msngset)
1630 prune_parents(filerevlog, hasset, msngset)
1625
1631
1626 # A function generator function that sets up the a context for the
1632 # A function generator function that sets up the a context for the
1627 # inner function.
1633 # inner function.
1628 def lookup_filenode_link_func(fname):
1634 def lookup_filenode_link_func(fname):
1629 msngset = msng_filenode_set[fname]
1635 msngset = msng_filenode_set[fname]
1630 # Lookup the changenode the filenode belongs to.
1636 # Lookup the changenode the filenode belongs to.
1631 def lookup_filenode_link(fnode):
1637 def lookup_filenode_link(fnode):
1632 return msngset[fnode]
1638 return msngset[fnode]
1633 return lookup_filenode_link
1639 return lookup_filenode_link
1634
1640
1635 # Now that we have all theses utility functions to help out and
1641 # Now that we have all theses utility functions to help out and
1636 # logically divide up the task, generate the group.
1642 # logically divide up the task, generate the group.
1637 def gengroup():
1643 def gengroup():
1638 # The set of changed files starts empty.
1644 # The set of changed files starts empty.
1639 changedfiles = {}
1645 changedfiles = {}
1640 # Create a changenode group generator that will call our functions
1646 # Create a changenode group generator that will call our functions
1641 # back to lookup the owning changenode and collect information.
1647 # back to lookup the owning changenode and collect information.
1642 group = cl.group(msng_cl_lst, identity,
1648 group = cl.group(msng_cl_lst, identity,
1643 manifest_and_file_collector(changedfiles))
1649 manifest_and_file_collector(changedfiles))
1644 for chnk in group:
1650 for chnk in group:
1645 yield chnk
1651 yield chnk
1646
1652
1647 # The list of manifests has been collected by the generator
1653 # The list of manifests has been collected by the generator
1648 # calling our functions back.
1654 # calling our functions back.
1649 prune_manifests()
1655 prune_manifests()
1650 msng_mnfst_lst = msng_mnfst_set.keys()
1656 msng_mnfst_lst = msng_mnfst_set.keys()
1651 # Sort the manifestnodes by revision number.
1657 # Sort the manifestnodes by revision number.
1652 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1658 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1653 # Create a generator for the manifestnodes that calls our lookup
1659 # Create a generator for the manifestnodes that calls our lookup
1654 # and data collection functions back.
1660 # and data collection functions back.
1655 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1661 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1656 filenode_collector(changedfiles))
1662 filenode_collector(changedfiles))
1657 for chnk in group:
1663 for chnk in group:
1658 yield chnk
1664 yield chnk
1659
1665
1660 # These are no longer needed, dereference and toss the memory for
1666 # These are no longer needed, dereference and toss the memory for
1661 # them.
1667 # them.
1662 msng_mnfst_lst = None
1668 msng_mnfst_lst = None
1663 msng_mnfst_set.clear()
1669 msng_mnfst_set.clear()
1664
1670
1665 changedfiles = changedfiles.keys()
1671 changedfiles = changedfiles.keys()
1666 changedfiles.sort()
1672 changedfiles.sort()
1667 # Go through all our files in order sorted by name.
1673 # Go through all our files in order sorted by name.
1668 for fname in changedfiles:
1674 for fname in changedfiles:
1669 filerevlog = self.file(fname)
1675 filerevlog = self.file(fname)
1670 # Toss out the filenodes that the recipient isn't really
1676 # Toss out the filenodes that the recipient isn't really
1671 # missing.
1677 # missing.
1672 if msng_filenode_set.has_key(fname):
1678 if msng_filenode_set.has_key(fname):
1673 prune_filenodes(fname, filerevlog)
1679 prune_filenodes(fname, filerevlog)
1674 msng_filenode_lst = msng_filenode_set[fname].keys()
1680 msng_filenode_lst = msng_filenode_set[fname].keys()
1675 else:
1681 else:
1676 msng_filenode_lst = []
1682 msng_filenode_lst = []
1677 # If any filenodes are left, generate the group for them,
1683 # If any filenodes are left, generate the group for them,
1678 # otherwise don't bother.
1684 # otherwise don't bother.
1679 if len(msng_filenode_lst) > 0:
1685 if len(msng_filenode_lst) > 0:
1680 yield changegroup.genchunk(fname)
1686 yield changegroup.genchunk(fname)
1681 # Sort the filenodes by their revision #
1687 # Sort the filenodes by their revision #
1682 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1688 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1683 # Create a group generator and only pass in a changenode
1689 # Create a group generator and only pass in a changenode
1684 # lookup function as we need to collect no information
1690 # lookup function as we need to collect no information
1685 # from filenodes.
1691 # from filenodes.
1686 group = filerevlog.group(msng_filenode_lst,
1692 group = filerevlog.group(msng_filenode_lst,
1687 lookup_filenode_link_func(fname))
1693 lookup_filenode_link_func(fname))
1688 for chnk in group:
1694 for chnk in group:
1689 yield chnk
1695 yield chnk
1690 if msng_filenode_set.has_key(fname):
1696 if msng_filenode_set.has_key(fname):
1691 # Don't need this anymore, toss it to free memory.
1697 # Don't need this anymore, toss it to free memory.
1692 del msng_filenode_set[fname]
1698 del msng_filenode_set[fname]
1693 # Signal that no more groups are left.
1699 # Signal that no more groups are left.
1694 yield changegroup.closechunk()
1700 yield changegroup.closechunk()
1695
1701
1696 if msng_cl_lst:
1702 if msng_cl_lst:
1697 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1703 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1698
1704
1699 return util.chunkbuffer(gengroup())
1705 return util.chunkbuffer(gengroup())
1700
1706
1701 def changegroup(self, basenodes, source):
1707 def changegroup(self, basenodes, source):
1702 """Generate a changegroup of all nodes that we have that a recipient
1708 """Generate a changegroup of all nodes that we have that a recipient
1703 doesn't.
1709 doesn't.
1704
1710
1705 This is much easier than the previous function as we can assume that
1711 This is much easier than the previous function as we can assume that
1706 the recipient has any changenode we aren't sending them."""
1712 the recipient has any changenode we aren't sending them."""
1707
1713
1708 self.hook('preoutgoing', throw=True, source=source)
1714 self.hook('preoutgoing', throw=True, source=source)
1709
1715
1710 cl = self.changelog
1716 cl = self.changelog
1711 nodes = cl.nodesbetween(basenodes, None)[0]
1717 nodes = cl.nodesbetween(basenodes, None)[0]
1712 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1718 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1713 self.changegroupinfo(nodes)
1719 self.changegroupinfo(nodes)
1714
1720
1715 def identity(x):
1721 def identity(x):
1716 return x
1722 return x
1717
1723
1718 def gennodelst(revlog):
1724 def gennodelst(revlog):
1719 for r in xrange(0, revlog.count()):
1725 for r in xrange(0, revlog.count()):
1720 n = revlog.node(r)
1726 n = revlog.node(r)
1721 if revlog.linkrev(n) in revset:
1727 if revlog.linkrev(n) in revset:
1722 yield n
1728 yield n
1723
1729
1724 def changed_file_collector(changedfileset):
1730 def changed_file_collector(changedfileset):
1725 def collect_changed_files(clnode):
1731 def collect_changed_files(clnode):
1726 c = cl.read(clnode)
1732 c = cl.read(clnode)
1727 for fname in c[3]:
1733 for fname in c[3]:
1728 changedfileset[fname] = 1
1734 changedfileset[fname] = 1
1729 return collect_changed_files
1735 return collect_changed_files
1730
1736
1731 def lookuprevlink_func(revlog):
1737 def lookuprevlink_func(revlog):
1732 def lookuprevlink(n):
1738 def lookuprevlink(n):
1733 return cl.node(revlog.linkrev(n))
1739 return cl.node(revlog.linkrev(n))
1734 return lookuprevlink
1740 return lookuprevlink
1735
1741
1736 def gengroup():
1742 def gengroup():
1737 # construct a list of all changed files
1743 # construct a list of all changed files
1738 changedfiles = {}
1744 changedfiles = {}
1739
1745
1740 for chnk in cl.group(nodes, identity,
1746 for chnk in cl.group(nodes, identity,
1741 changed_file_collector(changedfiles)):
1747 changed_file_collector(changedfiles)):
1742 yield chnk
1748 yield chnk
1743 changedfiles = changedfiles.keys()
1749 changedfiles = changedfiles.keys()
1744 changedfiles.sort()
1750 changedfiles.sort()
1745
1751
1746 mnfst = self.manifest
1752 mnfst = self.manifest
1747 nodeiter = gennodelst(mnfst)
1753 nodeiter = gennodelst(mnfst)
1748 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1754 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1749 yield chnk
1755 yield chnk
1750
1756
1751 for fname in changedfiles:
1757 for fname in changedfiles:
1752 filerevlog = self.file(fname)
1758 filerevlog = self.file(fname)
1753 nodeiter = gennodelst(filerevlog)
1759 nodeiter = gennodelst(filerevlog)
1754 nodeiter = list(nodeiter)
1760 nodeiter = list(nodeiter)
1755 if nodeiter:
1761 if nodeiter:
1756 yield changegroup.genchunk(fname)
1762 yield changegroup.genchunk(fname)
1757 lookup = lookuprevlink_func(filerevlog)
1763 lookup = lookuprevlink_func(filerevlog)
1758 for chnk in filerevlog.group(nodeiter, lookup):
1764 for chnk in filerevlog.group(nodeiter, lookup):
1759 yield chnk
1765 yield chnk
1760
1766
1761 yield changegroup.closechunk()
1767 yield changegroup.closechunk()
1762
1768
1763 if nodes:
1769 if nodes:
1764 self.hook('outgoing', node=hex(nodes[0]), source=source)
1770 self.hook('outgoing', node=hex(nodes[0]), source=source)
1765
1771
1766 return util.chunkbuffer(gengroup())
1772 return util.chunkbuffer(gengroup())
1767
1773
1768 def addchangegroup(self, source, srctype, url):
1774 def addchangegroup(self, source, srctype, url):
1769 """add changegroup to repo.
1775 """add changegroup to repo.
1770
1776
1771 return values:
1777 return values:
1772 - nothing changed or no source: 0
1778 - nothing changed or no source: 0
1773 - more heads than before: 1+added heads (2..n)
1779 - more heads than before: 1+added heads (2..n)
1774 - less heads than before: -1-removed heads (-2..-n)
1780 - less heads than before: -1-removed heads (-2..-n)
1775 - number of heads stays the same: 1
1781 - number of heads stays the same: 1
1776 """
1782 """
1777 def csmap(x):
1783 def csmap(x):
1778 self.ui.debug(_("add changeset %s\n") % short(x))
1784 self.ui.debug(_("add changeset %s\n") % short(x))
1779 return cl.count()
1785 return cl.count()
1780
1786
1781 def revmap(x):
1787 def revmap(x):
1782 return cl.rev(x)
1788 return cl.rev(x)
1783
1789
1784 if not source:
1790 if not source:
1785 return 0
1791 return 0
1786
1792
1787 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1793 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1788
1794
1789 changesets = files = revisions = 0
1795 changesets = files = revisions = 0
1790
1796
1791 tr = self.transaction()
1797 tr = self.transaction()
1792
1798
1793 # write changelog data to temp files so concurrent readers will not see
1799 # write changelog data to temp files so concurrent readers will not see
1794 # inconsistent view
1800 # inconsistent view
1795 cl = self.changelog
1801 cl = self.changelog
1796 cl.delayupdate()
1802 cl.delayupdate()
1797 oldheads = len(cl.heads())
1803 oldheads = len(cl.heads())
1798
1804
1799 # pull off the changeset group
1805 # pull off the changeset group
1800 self.ui.status(_("adding changesets\n"))
1806 self.ui.status(_("adding changesets\n"))
1801 cor = cl.count() - 1
1807 cor = cl.count() - 1
1802 chunkiter = changegroup.chunkiter(source)
1808 chunkiter = changegroup.chunkiter(source)
1803 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1809 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1804 raise util.Abort(_("received changelog group is empty"))
1810 raise util.Abort(_("received changelog group is empty"))
1805 cnr = cl.count() - 1
1811 cnr = cl.count() - 1
1806 changesets = cnr - cor
1812 changesets = cnr - cor
1807
1813
1808 # pull off the manifest group
1814 # pull off the manifest group
1809 self.ui.status(_("adding manifests\n"))
1815 self.ui.status(_("adding manifests\n"))
1810 chunkiter = changegroup.chunkiter(source)
1816 chunkiter = changegroup.chunkiter(source)
1811 # no need to check for empty manifest group here:
1817 # no need to check for empty manifest group here:
1812 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1818 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1813 # no new manifest will be created and the manifest group will
1819 # no new manifest will be created and the manifest group will
1814 # be empty during the pull
1820 # be empty during the pull
1815 self.manifest.addgroup(chunkiter, revmap, tr)
1821 self.manifest.addgroup(chunkiter, revmap, tr)
1816
1822
1817 # process the files
1823 # process the files
1818 self.ui.status(_("adding file changes\n"))
1824 self.ui.status(_("adding file changes\n"))
1819 while 1:
1825 while 1:
1820 f = changegroup.getchunk(source)
1826 f = changegroup.getchunk(source)
1821 if not f:
1827 if not f:
1822 break
1828 break
1823 self.ui.debug(_("adding %s revisions\n") % f)
1829 self.ui.debug(_("adding %s revisions\n") % f)
1824 fl = self.file(f)
1830 fl = self.file(f)
1825 o = fl.count()
1831 o = fl.count()
1826 chunkiter = changegroup.chunkiter(source)
1832 chunkiter = changegroup.chunkiter(source)
1827 if fl.addgroup(chunkiter, revmap, tr) is None:
1833 if fl.addgroup(chunkiter, revmap, tr) is None:
1828 raise util.Abort(_("received file revlog group is empty"))
1834 raise util.Abort(_("received file revlog group is empty"))
1829 revisions += fl.count() - o
1835 revisions += fl.count() - o
1830 files += 1
1836 files += 1
1831
1837
1832 # make changelog see real files again
1838 # make changelog see real files again
1833 cl.finalize(tr)
1839 cl.finalize(tr)
1834
1840
1835 newheads = len(self.changelog.heads())
1841 newheads = len(self.changelog.heads())
1836 heads = ""
1842 heads = ""
1837 if oldheads and newheads != oldheads:
1843 if oldheads and newheads != oldheads:
1838 heads = _(" (%+d heads)") % (newheads - oldheads)
1844 heads = _(" (%+d heads)") % (newheads - oldheads)
1839
1845
1840 self.ui.status(_("added %d changesets"
1846 self.ui.status(_("added %d changesets"
1841 " with %d changes to %d files%s\n")
1847 " with %d changes to %d files%s\n")
1842 % (changesets, revisions, files, heads))
1848 % (changesets, revisions, files, heads))
1843
1849
1844 if changesets > 0:
1850 if changesets > 0:
1845 self.hook('pretxnchangegroup', throw=True,
1851 self.hook('pretxnchangegroup', throw=True,
1846 node=hex(self.changelog.node(cor+1)), source=srctype,
1852 node=hex(self.changelog.node(cor+1)), source=srctype,
1847 url=url)
1853 url=url)
1848
1854
1849 tr.close()
1855 tr.close()
1850
1856
1851 if changesets > 0:
1857 if changesets > 0:
1852 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1858 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1853 source=srctype, url=url)
1859 source=srctype, url=url)
1854
1860
1855 for i in xrange(cor + 1, cnr + 1):
1861 for i in xrange(cor + 1, cnr + 1):
1856 self.hook("incoming", node=hex(self.changelog.node(i)),
1862 self.hook("incoming", node=hex(self.changelog.node(i)),
1857 source=srctype, url=url)
1863 source=srctype, url=url)
1858
1864
1859 # never return 0 here:
1865 # never return 0 here:
1860 if newheads < oldheads:
1866 if newheads < oldheads:
1861 return newheads - oldheads - 1
1867 return newheads - oldheads - 1
1862 else:
1868 else:
1863 return newheads - oldheads + 1
1869 return newheads - oldheads + 1
1864
1870
1865
1871
1866 def stream_in(self, remote):
1872 def stream_in(self, remote):
1867 fp = remote.stream_out()
1873 fp = remote.stream_out()
1868 l = fp.readline()
1874 l = fp.readline()
1869 try:
1875 try:
1870 resp = int(l)
1876 resp = int(l)
1871 except ValueError:
1877 except ValueError:
1872 raise util.UnexpectedOutput(
1878 raise util.UnexpectedOutput(
1873 _('Unexpected response from remote server:'), l)
1879 _('Unexpected response from remote server:'), l)
1874 if resp == 1:
1880 if resp == 1:
1875 raise util.Abort(_('operation forbidden by server'))
1881 raise util.Abort(_('operation forbidden by server'))
1876 elif resp == 2:
1882 elif resp == 2:
1877 raise util.Abort(_('locking the remote repository failed'))
1883 raise util.Abort(_('locking the remote repository failed'))
1878 elif resp != 0:
1884 elif resp != 0:
1879 raise util.Abort(_('the server sent an unknown error code'))
1885 raise util.Abort(_('the server sent an unknown error code'))
1880 self.ui.status(_('streaming all changes\n'))
1886 self.ui.status(_('streaming all changes\n'))
1881 l = fp.readline()
1887 l = fp.readline()
1882 try:
1888 try:
1883 total_files, total_bytes = map(int, l.split(' ', 1))
1889 total_files, total_bytes = map(int, l.split(' ', 1))
1884 except ValueError, TypeError:
1890 except ValueError, TypeError:
1885 raise util.UnexpectedOutput(
1891 raise util.UnexpectedOutput(
1886 _('Unexpected response from remote server:'), l)
1892 _('Unexpected response from remote server:'), l)
1887 self.ui.status(_('%d files to transfer, %s of data\n') %
1893 self.ui.status(_('%d files to transfer, %s of data\n') %
1888 (total_files, util.bytecount(total_bytes)))
1894 (total_files, util.bytecount(total_bytes)))
1889 start = time.time()
1895 start = time.time()
1890 for i in xrange(total_files):
1896 for i in xrange(total_files):
1891 # XXX doesn't support '\n' or '\r' in filenames
1897 # XXX doesn't support '\n' or '\r' in filenames
1892 l = fp.readline()
1898 l = fp.readline()
1893 try:
1899 try:
1894 name, size = l.split('\0', 1)
1900 name, size = l.split('\0', 1)
1895 size = int(size)
1901 size = int(size)
1896 except ValueError, TypeError:
1902 except ValueError, TypeError:
1897 raise util.UnexpectedOutput(
1903 raise util.UnexpectedOutput(
1898 _('Unexpected response from remote server:'), l)
1904 _('Unexpected response from remote server:'), l)
1899 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1905 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1900 ofp = self.sopener(name, 'w')
1906 ofp = self.sopener(name, 'w')
1901 for chunk in util.filechunkiter(fp, limit=size):
1907 for chunk in util.filechunkiter(fp, limit=size):
1902 ofp.write(chunk)
1908 ofp.write(chunk)
1903 ofp.close()
1909 ofp.close()
1904 elapsed = time.time() - start
1910 elapsed = time.time() - start
1905 if elapsed <= 0:
1911 if elapsed <= 0:
1906 elapsed = 0.001
1912 elapsed = 0.001
1907 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1913 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1908 (util.bytecount(total_bytes), elapsed,
1914 (util.bytecount(total_bytes), elapsed,
1909 util.bytecount(total_bytes / elapsed)))
1915 util.bytecount(total_bytes / elapsed)))
1910 self.reload()
1916 self.reload()
1911 return len(self.heads()) + 1
1917 return len(self.heads()) + 1
1912
1918
1913 def clone(self, remote, heads=[], stream=False):
1919 def clone(self, remote, heads=[], stream=False):
1914 '''clone remote repository.
1920 '''clone remote repository.
1915
1921
1916 keyword arguments:
1922 keyword arguments:
1917 heads: list of revs to clone (forces use of pull)
1923 heads: list of revs to clone (forces use of pull)
1918 stream: use streaming clone if possible'''
1924 stream: use streaming clone if possible'''
1919
1925
1920 # now, all clients that can request uncompressed clones can
1926 # now, all clients that can request uncompressed clones can
1921 # read repo formats supported by all servers that can serve
1927 # read repo formats supported by all servers that can serve
1922 # them.
1928 # them.
1923
1929
1924 # if revlog format changes, client will have to check version
1930 # if revlog format changes, client will have to check version
1925 # and format flags on "stream" capability, and use
1931 # and format flags on "stream" capability, and use
1926 # uncompressed only if compatible.
1932 # uncompressed only if compatible.
1927
1933
1928 if stream and not heads and remote.capable('stream'):
1934 if stream and not heads and remote.capable('stream'):
1929 return self.stream_in(remote)
1935 return self.stream_in(remote)
1930 return self.pull(remote, heads)
1936 return self.pull(remote, heads)
1931
1937
1932 # used to avoid circular references so destructors work
1938 # used to avoid circular references so destructors work
1933 def aftertrans(files):
1939 def aftertrans(files):
1934 renamefiles = [tuple(t) for t in files]
1940 renamefiles = [tuple(t) for t in files]
1935 def a():
1941 def a():
1936 for src, dest in renamefiles:
1942 for src, dest in renamefiles:
1937 util.rename(src, dest)
1943 util.rename(src, dest)
1938 return a
1944 return a
1939
1945
1940 def instance(ui, path, create):
1946 def instance(ui, path, create):
1941 return localrepository(ui, util.drop_scheme('file', path), create)
1947 return localrepository(ui, util.drop_scheme('file', path), create)
1942
1948
1943 def islocal(path):
1949 def islocal(path):
1944 return True
1950 return True
General Comments 0
You need to be logged in to leave comments. Login now