##// END OF EJS Templates
localrepo: demand-load changeset, manifest, and dirstate
Matt Mackall -
r4559:eda59019 default
parent child Browse files
Show More
@@ -1,1958 +1,1967 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.path = path
23 self.path = path
24 self.root = os.path.realpath(path)
24 self.root = os.path.realpath(path)
25 self.path = os.path.join(self.root, ".hg")
25 self.path = os.path.join(self.root, ".hg")
26 self.origroot = path
26 self.origroot = path
27 self.opener = util.opener(self.path)
27 self.opener = util.opener(self.path)
28 self.wopener = util.opener(self.root)
28 self.wopener = util.opener(self.root)
29
29
30 if not os.path.isdir(self.path):
30 if not os.path.isdir(self.path):
31 if create:
31 if create:
32 if not os.path.exists(path):
32 if not os.path.exists(path):
33 os.mkdir(path)
33 os.mkdir(path)
34 os.mkdir(self.path)
34 os.mkdir(self.path)
35 requirements = ["revlogv1"]
35 requirements = ["revlogv1"]
36 if parentui.configbool('format', 'usestore', True):
36 if parentui.configbool('format', 'usestore', True):
37 os.mkdir(os.path.join(self.path, "store"))
37 os.mkdir(os.path.join(self.path, "store"))
38 requirements.append("store")
38 requirements.append("store")
39 # create an invalid changelog
39 # create an invalid changelog
40 self.opener("00changelog.i", "a").write(
40 self.opener("00changelog.i", "a").write(
41 '\0\0\0\2' # represents revlogv2
41 '\0\0\0\2' # represents revlogv2
42 ' dummy changelog to prevent using the old repo layout'
42 ' dummy changelog to prevent using the old repo layout'
43 )
43 )
44 reqfile = self.opener("requires", "w")
44 reqfile = self.opener("requires", "w")
45 for r in requirements:
45 for r in requirements:
46 reqfile.write("%s\n" % r)
46 reqfile.write("%s\n" % r)
47 reqfile.close()
47 reqfile.close()
48 else:
48 else:
49 raise repo.RepoError(_("repository %s not found") % path)
49 raise repo.RepoError(_("repository %s not found") % path)
50 elif create:
50 elif create:
51 raise repo.RepoError(_("repository %s already exists") % path)
51 raise repo.RepoError(_("repository %s already exists") % path)
52 else:
52 else:
53 # find requirements
53 # find requirements
54 try:
54 try:
55 requirements = self.opener("requires").read().splitlines()
55 requirements = self.opener("requires").read().splitlines()
56 except IOError, inst:
56 except IOError, inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 requirements = []
59 requirements = []
60 # check them
60 # check them
61 for r in requirements:
61 for r in requirements:
62 if r not in self.supported:
62 if r not in self.supported:
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64
64
65 # setup store
65 # setup store
66 if "store" in requirements:
66 if "store" in requirements:
67 self.encodefn = util.encodefilename
67 self.encodefn = util.encodefilename
68 self.decodefn = util.decodefilename
68 self.decodefn = util.decodefilename
69 self.spath = os.path.join(self.path, "store")
69 self.spath = os.path.join(self.path, "store")
70 else:
70 else:
71 self.encodefn = lambda x: x
71 self.encodefn = lambda x: x
72 self.decodefn = lambda x: x
72 self.decodefn = lambda x: x
73 self.spath = self.path
73 self.spath = self.path
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75
75
76 self.ui = ui.ui(parentui=parentui)
76 self.ui = ui.ui(parentui=parentui)
77 try:
77 try:
78 self.ui.readconfig(self.join("hgrc"), self.root)
78 self.ui.readconfig(self.join("hgrc"), self.root)
79 except IOError:
79 except IOError:
80 pass
80 pass
81
81
82 self.changelog = changelog.changelog(self.sopener)
83 self.sopener.defversion = self.changelog.version
84 self.manifest = manifest.manifest(self.sopener)
85
86 fallback = self.ui.config('ui', 'fallbackencoding')
82 fallback = self.ui.config('ui', 'fallbackencoding')
87 if fallback:
83 if fallback:
88 util._fallbackencoding = fallback
84 util._fallbackencoding = fallback
89
85
90 self.tagscache = None
86 self.tagscache = None
91 self.branchcache = None
87 self.branchcache = None
92 self.nodetagscache = None
88 self.nodetagscache = None
93 self.filterpats = {}
89 self.filterpats = {}
94 self.transhandle = None
90 self.transhandle = None
95
91
92 def __getattr__(self, name):
93 if name == 'changelog':
94 self.changelog = changelog.changelog(self.sopener)
95 self.sopener.defversion = self.changelog.version
96 return self.changelog
97 if name == 'manifest':
98 self.changelog
99 self.manifest = manifest.manifest(self.sopener)
100 return self.manifest
101 if name == 'dirstate':
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
103 return self.dirstate
104 else:
105 raise AttributeError, name
97
106
98 def url(self):
107 def url(self):
99 return 'file:' + self.root
108 return 'file:' + self.root
100
109
101 def hook(self, name, throw=False, **args):
110 def hook(self, name, throw=False, **args):
102 def callhook(hname, funcname):
111 def callhook(hname, funcname):
103 '''call python hook. hook is callable object, looked up as
112 '''call python hook. hook is callable object, looked up as
104 name in python module. if callable returns "true", hook
113 name in python module. if callable returns "true", hook
105 fails, else passes. if hook raises exception, treated as
114 fails, else passes. if hook raises exception, treated as
106 hook failure. exception propagates if throw is "true".
115 hook failure. exception propagates if throw is "true".
107
116
108 reason for "true" meaning "hook failed" is so that
117 reason for "true" meaning "hook failed" is so that
109 unmodified commands (e.g. mercurial.commands.update) can
118 unmodified commands (e.g. mercurial.commands.update) can
110 be run as hooks without wrappers to convert return values.'''
119 be run as hooks without wrappers to convert return values.'''
111
120
112 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
113 obj = funcname
122 obj = funcname
114 if not callable(obj):
123 if not callable(obj):
115 d = funcname.rfind('.')
124 d = funcname.rfind('.')
116 if d == -1:
125 if d == -1:
117 raise util.Abort(_('%s hook is invalid ("%s" not in '
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
118 'a module)') % (hname, funcname))
127 'a module)') % (hname, funcname))
119 modname = funcname[:d]
128 modname = funcname[:d]
120 try:
129 try:
121 obj = __import__(modname)
130 obj = __import__(modname)
122 except ImportError:
131 except ImportError:
123 try:
132 try:
124 # extensions are loaded with hgext_ prefix
133 # extensions are loaded with hgext_ prefix
125 obj = __import__("hgext_%s" % modname)
134 obj = __import__("hgext_%s" % modname)
126 except ImportError:
135 except ImportError:
127 raise util.Abort(_('%s hook is invalid '
136 raise util.Abort(_('%s hook is invalid '
128 '(import of "%s" failed)') %
137 '(import of "%s" failed)') %
129 (hname, modname))
138 (hname, modname))
130 try:
139 try:
131 for p in funcname.split('.')[1:]:
140 for p in funcname.split('.')[1:]:
132 obj = getattr(obj, p)
141 obj = getattr(obj, p)
133 except AttributeError, err:
142 except AttributeError, err:
134 raise util.Abort(_('%s hook is invalid '
143 raise util.Abort(_('%s hook is invalid '
135 '("%s" is not defined)') %
144 '("%s" is not defined)') %
136 (hname, funcname))
145 (hname, funcname))
137 if not callable(obj):
146 if not callable(obj):
138 raise util.Abort(_('%s hook is invalid '
147 raise util.Abort(_('%s hook is invalid '
139 '("%s" is not callable)') %
148 '("%s" is not callable)') %
140 (hname, funcname))
149 (hname, funcname))
141 try:
150 try:
142 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
143 except (KeyboardInterrupt, util.SignalInterrupt):
152 except (KeyboardInterrupt, util.SignalInterrupt):
144 raise
153 raise
145 except Exception, exc:
154 except Exception, exc:
146 if isinstance(exc, util.Abort):
155 if isinstance(exc, util.Abort):
147 self.ui.warn(_('error: %s hook failed: %s\n') %
156 self.ui.warn(_('error: %s hook failed: %s\n') %
148 (hname, exc.args[0]))
157 (hname, exc.args[0]))
149 else:
158 else:
150 self.ui.warn(_('error: %s hook raised an exception: '
159 self.ui.warn(_('error: %s hook raised an exception: '
151 '%s\n') % (hname, exc))
160 '%s\n') % (hname, exc))
152 if throw:
161 if throw:
153 raise
162 raise
154 self.ui.print_exc()
163 self.ui.print_exc()
155 return True
164 return True
156 if r:
165 if r:
157 if throw:
166 if throw:
158 raise util.Abort(_('%s hook failed') % hname)
167 raise util.Abort(_('%s hook failed') % hname)
159 self.ui.warn(_('warning: %s hook failed\n') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
160 return r
169 return r
161
170
162 def runhook(name, cmd):
171 def runhook(name, cmd):
163 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
164 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
165 r = util.system(cmd, environ=env, cwd=self.root)
174 r = util.system(cmd, environ=env, cwd=self.root)
166 if r:
175 if r:
167 desc, r = util.explain_exit(r)
176 desc, r = util.explain_exit(r)
168 if throw:
177 if throw:
169 raise util.Abort(_('%s hook %s') % (name, desc))
178 raise util.Abort(_('%s hook %s') % (name, desc))
170 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
171 return r
180 return r
172
181
173 r = False
182 r = False
174 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
175 if hname.split(".", 1)[0] == name and cmd]
184 if hname.split(".", 1)[0] == name and cmd]
176 hooks.sort()
185 hooks.sort()
177 for hname, cmd in hooks:
186 for hname, cmd in hooks:
178 if callable(cmd):
187 if callable(cmd):
179 r = callhook(hname, cmd) or r
188 r = callhook(hname, cmd) or r
180 elif cmd.startswith('python:'):
189 elif cmd.startswith('python:'):
181 r = callhook(hname, cmd[7:].strip()) or r
190 r = callhook(hname, cmd[7:].strip()) or r
182 else:
191 else:
183 r = runhook(hname, cmd) or r
192 r = runhook(hname, cmd) or r
184 return r
193 return r
185
194
186 tag_disallowed = ':\r\n'
195 tag_disallowed = ':\r\n'
187
196
188 def _tag(self, name, node, message, local, user, date, parent=None):
197 def _tag(self, name, node, message, local, user, date, parent=None):
189 use_dirstate = parent is None
198 use_dirstate = parent is None
190
199
191 for c in self.tag_disallowed:
200 for c in self.tag_disallowed:
192 if c in name:
201 if c in name:
193 raise util.Abort(_('%r cannot be used in a tag name') % c)
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
194
203
195 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
196
205
197 if local:
206 if local:
198 # local tags are stored in the current charset
207 # local tags are stored in the current charset
199 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
200 self.hook('tag', node=hex(node), tag=name, local=local)
209 self.hook('tag', node=hex(node), tag=name, local=local)
201 return
210 return
202
211
203 # committed tags are stored in UTF-8
212 # committed tags are stored in UTF-8
204 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
205 if use_dirstate:
214 if use_dirstate:
206 self.wfile('.hgtags', 'ab').write(line)
215 self.wfile('.hgtags', 'ab').write(line)
207 else:
216 else:
208 ntags = self.filectx('.hgtags', parent).data()
217 ntags = self.filectx('.hgtags', parent).data()
209 self.wfile('.hgtags', 'ab').write(ntags + line)
218 self.wfile('.hgtags', 'ab').write(ntags + line)
210 if use_dirstate and self.dirstate.state('.hgtags') == '?':
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
211 self.add(['.hgtags'])
220 self.add(['.hgtags'])
212
221
213 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
214
223
215 self.hook('tag', node=hex(node), tag=name, local=local)
224 self.hook('tag', node=hex(node), tag=name, local=local)
216
225
217 return tagnode
226 return tagnode
218
227
219 def tag(self, name, node, message, local, user, date):
228 def tag(self, name, node, message, local, user, date):
220 '''tag a revision with a symbolic name.
229 '''tag a revision with a symbolic name.
221
230
222 if local is True, the tag is stored in a per-repository file.
231 if local is True, the tag is stored in a per-repository file.
223 otherwise, it is stored in the .hgtags file, and a new
232 otherwise, it is stored in the .hgtags file, and a new
224 changeset is committed with the change.
233 changeset is committed with the change.
225
234
226 keyword arguments:
235 keyword arguments:
227
236
228 local: whether to store tag in non-version-controlled file
237 local: whether to store tag in non-version-controlled file
229 (default False)
238 (default False)
230
239
231 message: commit message to use if committing
240 message: commit message to use if committing
232
241
233 user: name of user to use if committing
242 user: name of user to use if committing
234
243
235 date: date tuple to use if committing'''
244 date: date tuple to use if committing'''
236
245
237 for x in self.status()[:5]:
246 for x in self.status()[:5]:
238 if '.hgtags' in x:
247 if '.hgtags' in x:
239 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
240 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
241
250
242
251
243 self._tag(name, node, message, local, user, date)
252 self._tag(name, node, message, local, user, date)
244
253
245 def tags(self):
254 def tags(self):
246 '''return a mapping of tag to node'''
255 '''return a mapping of tag to node'''
247 if self.tagscache:
256 if self.tagscache:
248 return self.tagscache
257 return self.tagscache
249
258
250 globaltags = {}
259 globaltags = {}
251
260
252 def readtags(lines, fn):
261 def readtags(lines, fn):
253 filetags = {}
262 filetags = {}
254 count = 0
263 count = 0
255
264
256 def warn(msg):
265 def warn(msg):
257 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
258
267
259 for l in lines:
268 for l in lines:
260 count += 1
269 count += 1
261 if not l:
270 if not l:
262 continue
271 continue
263 s = l.split(" ", 1)
272 s = l.split(" ", 1)
264 if len(s) != 2:
273 if len(s) != 2:
265 warn(_("cannot parse entry"))
274 warn(_("cannot parse entry"))
266 continue
275 continue
267 node, key = s
276 node, key = s
268 key = util.tolocal(key.strip()) # stored in UTF-8
277 key = util.tolocal(key.strip()) # stored in UTF-8
269 try:
278 try:
270 bin_n = bin(node)
279 bin_n = bin(node)
271 except TypeError:
280 except TypeError:
272 warn(_("node '%s' is not well formed") % node)
281 warn(_("node '%s' is not well formed") % node)
273 continue
282 continue
274 if bin_n not in self.changelog.nodemap:
283 if bin_n not in self.changelog.nodemap:
275 warn(_("tag '%s' refers to unknown node") % key)
284 warn(_("tag '%s' refers to unknown node") % key)
276 continue
285 continue
277
286
278 h = []
287 h = []
279 if key in filetags:
288 if key in filetags:
280 n, h = filetags[key]
289 n, h = filetags[key]
281 h.append(n)
290 h.append(n)
282 filetags[key] = (bin_n, h)
291 filetags[key] = (bin_n, h)
283
292
284 for k,nh in filetags.items():
293 for k,nh in filetags.items():
285 if k not in globaltags:
294 if k not in globaltags:
286 globaltags[k] = nh
295 globaltags[k] = nh
287 continue
296 continue
288 # we prefer the global tag if:
297 # we prefer the global tag if:
289 # it supercedes us OR
298 # it supercedes us OR
290 # mutual supercedes and it has a higher rank
299 # mutual supercedes and it has a higher rank
291 # otherwise we win because we're tip-most
300 # otherwise we win because we're tip-most
292 an, ah = nh
301 an, ah = nh
293 bn, bh = globaltags[k]
302 bn, bh = globaltags[k]
294 if bn != an and an in bh and \
303 if bn != an and an in bh and \
295 (bn not in ah or len(bh) > len(ah)):
304 (bn not in ah or len(bh) > len(ah)):
296 an = bn
305 an = bn
297 ah.extend([n for n in bh if n not in ah])
306 ah.extend([n for n in bh if n not in ah])
298 globaltags[k] = an, ah
307 globaltags[k] = an, ah
299
308
300 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
301 f = None
310 f = None
302 for rev, node, fnode in self._hgtagsnodes():
311 for rev, node, fnode in self._hgtagsnodes():
303 f = (f and f.filectx(fnode) or
312 f = (f and f.filectx(fnode) or
304 self.filectx('.hgtags', fileid=fnode))
313 self.filectx('.hgtags', fileid=fnode))
305 readtags(f.data().splitlines(), f)
314 readtags(f.data().splitlines(), f)
306
315
307 try:
316 try:
308 data = util.fromlocal(self.opener("localtags").read())
317 data = util.fromlocal(self.opener("localtags").read())
309 # localtags are stored in the local character set
318 # localtags are stored in the local character set
310 # while the internal tag table is stored in UTF-8
319 # while the internal tag table is stored in UTF-8
311 readtags(data.splitlines(), "localtags")
320 readtags(data.splitlines(), "localtags")
312 except IOError:
321 except IOError:
313 pass
322 pass
314
323
315 self.tagscache = {}
324 self.tagscache = {}
316 for k,nh in globaltags.items():
325 for k,nh in globaltags.items():
317 n = nh[0]
326 n = nh[0]
318 if n != nullid:
327 if n != nullid:
319 self.tagscache[k] = n
328 self.tagscache[k] = n
320 self.tagscache['tip'] = self.changelog.tip()
329 self.tagscache['tip'] = self.changelog.tip()
321
330
322 return self.tagscache
331 return self.tagscache
323
332
324 def _hgtagsnodes(self):
333 def _hgtagsnodes(self):
325 heads = self.heads()
334 heads = self.heads()
326 heads.reverse()
335 heads.reverse()
327 last = {}
336 last = {}
328 ret = []
337 ret = []
329 for node in heads:
338 for node in heads:
330 c = self.changectx(node)
339 c = self.changectx(node)
331 rev = c.rev()
340 rev = c.rev()
332 try:
341 try:
333 fnode = c.filenode('.hgtags')
342 fnode = c.filenode('.hgtags')
334 except revlog.LookupError:
343 except revlog.LookupError:
335 continue
344 continue
336 ret.append((rev, node, fnode))
345 ret.append((rev, node, fnode))
337 if fnode in last:
346 if fnode in last:
338 ret[last[fnode]] = None
347 ret[last[fnode]] = None
339 last[fnode] = len(ret) - 1
348 last[fnode] = len(ret) - 1
340 return [item for item in ret if item]
349 return [item for item in ret if item]
341
350
342 def tagslist(self):
351 def tagslist(self):
343 '''return a list of tags ordered by revision'''
352 '''return a list of tags ordered by revision'''
344 l = []
353 l = []
345 for t, n in self.tags().items():
354 for t, n in self.tags().items():
346 try:
355 try:
347 r = self.changelog.rev(n)
356 r = self.changelog.rev(n)
348 except:
357 except:
349 r = -2 # sort to the beginning of the list if unknown
358 r = -2 # sort to the beginning of the list if unknown
350 l.append((r, t, n))
359 l.append((r, t, n))
351 l.sort()
360 l.sort()
352 return [(t, n) for r, t, n in l]
361 return [(t, n) for r, t, n in l]
353
362
354 def nodetags(self, node):
363 def nodetags(self, node):
355 '''return the tags associated with a node'''
364 '''return the tags associated with a node'''
356 if not self.nodetagscache:
365 if not self.nodetagscache:
357 self.nodetagscache = {}
366 self.nodetagscache = {}
358 for t, n in self.tags().items():
367 for t, n in self.tags().items():
359 self.nodetagscache.setdefault(n, []).append(t)
368 self.nodetagscache.setdefault(n, []).append(t)
360 return self.nodetagscache.get(node, [])
369 return self.nodetagscache.get(node, [])
361
370
362 def _branchtags(self):
371 def _branchtags(self):
363 partial, last, lrev = self._readbranchcache()
372 partial, last, lrev = self._readbranchcache()
364
373
365 tiprev = self.changelog.count() - 1
374 tiprev = self.changelog.count() - 1
366 if lrev != tiprev:
375 if lrev != tiprev:
367 self._updatebranchcache(partial, lrev+1, tiprev+1)
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
368 self._writebranchcache(partial, self.changelog.tip(), tiprev)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
369
378
370 return partial
379 return partial
371
380
372 def branchtags(self):
381 def branchtags(self):
373 if self.branchcache is not None:
382 if self.branchcache is not None:
374 return self.branchcache
383 return self.branchcache
375
384
376 self.branchcache = {} # avoid recursion in changectx
385 self.branchcache = {} # avoid recursion in changectx
377 partial = self._branchtags()
386 partial = self._branchtags()
378
387
379 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
380 # charset internally
389 # charset internally
381 for k, v in partial.items():
390 for k, v in partial.items():
382 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
383 return self.branchcache
392 return self.branchcache
384
393
385 def _readbranchcache(self):
394 def _readbranchcache(self):
386 partial = {}
395 partial = {}
387 try:
396 try:
388 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
389 lines = f.read().split('\n')
398 lines = f.read().split('\n')
390 f.close()
399 f.close()
391 except (IOError, OSError):
400 except (IOError, OSError):
392 return {}, nullid, nullrev
401 return {}, nullid, nullrev
393
402
394 try:
403 try:
395 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
396 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
397 if not (lrev < self.changelog.count() and
406 if not (lrev < self.changelog.count() and
398 self.changelog.node(lrev) == last): # sanity check
407 self.changelog.node(lrev) == last): # sanity check
399 # invalidate the cache
408 # invalidate the cache
400 raise ValueError('Invalid branch cache: unknown tip')
409 raise ValueError('Invalid branch cache: unknown tip')
401 for l in lines:
410 for l in lines:
402 if not l: continue
411 if not l: continue
403 node, label = l.split(" ", 1)
412 node, label = l.split(" ", 1)
404 partial[label.strip()] = bin(node)
413 partial[label.strip()] = bin(node)
405 except (KeyboardInterrupt, util.SignalInterrupt):
414 except (KeyboardInterrupt, util.SignalInterrupt):
406 raise
415 raise
407 except Exception, inst:
416 except Exception, inst:
408 if self.ui.debugflag:
417 if self.ui.debugflag:
409 self.ui.warn(str(inst), '\n')
418 self.ui.warn(str(inst), '\n')
410 partial, last, lrev = {}, nullid, nullrev
419 partial, last, lrev = {}, nullid, nullrev
411 return partial, last, lrev
420 return partial, last, lrev
412
421
413 def _writebranchcache(self, branches, tip, tiprev):
422 def _writebranchcache(self, branches, tip, tiprev):
414 try:
423 try:
415 f = self.opener("branch.cache", "w", atomictemp=True)
424 f = self.opener("branch.cache", "w", atomictemp=True)
416 f.write("%s %s\n" % (hex(tip), tiprev))
425 f.write("%s %s\n" % (hex(tip), tiprev))
417 for label, node in branches.iteritems():
426 for label, node in branches.iteritems():
418 f.write("%s %s\n" % (hex(node), label))
427 f.write("%s %s\n" % (hex(node), label))
419 f.rename()
428 f.rename()
420 except (IOError, OSError):
429 except (IOError, OSError):
421 pass
430 pass
422
431
423 def _updatebranchcache(self, partial, start, end):
432 def _updatebranchcache(self, partial, start, end):
424 for r in xrange(start, end):
433 for r in xrange(start, end):
425 c = self.changectx(r)
434 c = self.changectx(r)
426 b = c.branch()
435 b = c.branch()
427 partial[b] = c.node()
436 partial[b] = c.node()
428
437
429 def lookup(self, key):
438 def lookup(self, key):
430 if key == '.':
439 if key == '.':
431 key, second = self.dirstate.parents()
440 key, second = self.dirstate.parents()
432 if key == nullid:
441 if key == nullid:
433 raise repo.RepoError(_("no revision checked out"))
442 raise repo.RepoError(_("no revision checked out"))
434 if second != nullid:
443 if second != nullid:
435 self.ui.warn(_("warning: working directory has two parents, "
444 self.ui.warn(_("warning: working directory has two parents, "
436 "tag '.' uses the first\n"))
445 "tag '.' uses the first\n"))
437 elif key == 'null':
446 elif key == 'null':
438 return nullid
447 return nullid
439 n = self.changelog._match(key)
448 n = self.changelog._match(key)
440 if n:
449 if n:
441 return n
450 return n
442 if key in self.tags():
451 if key in self.tags():
443 return self.tags()[key]
452 return self.tags()[key]
444 if key in self.branchtags():
453 if key in self.branchtags():
445 return self.branchtags()[key]
454 return self.branchtags()[key]
446 n = self.changelog._partialmatch(key)
455 n = self.changelog._partialmatch(key)
447 if n:
456 if n:
448 return n
457 return n
449 raise repo.RepoError(_("unknown revision '%s'") % key)
458 raise repo.RepoError(_("unknown revision '%s'") % key)
450
459
451 def dev(self):
460 def dev(self):
452 return os.lstat(self.path).st_dev
461 return os.lstat(self.path).st_dev
453
462
454 def local(self):
463 def local(self):
455 return True
464 return True
456
465
457 def join(self, f):
466 def join(self, f):
458 return os.path.join(self.path, f)
467 return os.path.join(self.path, f)
459
468
460 def sjoin(self, f):
469 def sjoin(self, f):
461 f = self.encodefn(f)
470 f = self.encodefn(f)
462 return os.path.join(self.spath, f)
471 return os.path.join(self.spath, f)
463
472
464 def wjoin(self, f):
473 def wjoin(self, f):
465 return os.path.join(self.root, f)
474 return os.path.join(self.root, f)
466
475
467 def file(self, f):
476 def file(self, f):
468 if f[0] == '/':
477 if f[0] == '/':
469 f = f[1:]
478 f = f[1:]
470 return filelog.filelog(self.sopener, f)
479 return filelog.filelog(self.sopener, f)
471
480
472 def changectx(self, changeid=None):
481 def changectx(self, changeid=None):
473 return context.changectx(self, changeid)
482 return context.changectx(self, changeid)
474
483
475 def workingctx(self):
484 def workingctx(self):
476 return context.workingctx(self)
485 return context.workingctx(self)
477
486
478 def parents(self, changeid=None):
487 def parents(self, changeid=None):
479 '''
488 '''
480 get list of changectxs for parents of changeid or working directory
489 get list of changectxs for parents of changeid or working directory
481 '''
490 '''
482 if changeid is None:
491 if changeid is None:
483 pl = self.dirstate.parents()
492 pl = self.dirstate.parents()
484 else:
493 else:
485 n = self.changelog.lookup(changeid)
494 n = self.changelog.lookup(changeid)
486 pl = self.changelog.parents(n)
495 pl = self.changelog.parents(n)
487 if pl[1] == nullid:
496 if pl[1] == nullid:
488 return [self.changectx(pl[0])]
497 return [self.changectx(pl[0])]
489 return [self.changectx(pl[0]), self.changectx(pl[1])]
498 return [self.changectx(pl[0]), self.changectx(pl[1])]
490
499
491 def filectx(self, path, changeid=None, fileid=None):
500 def filectx(self, path, changeid=None, fileid=None):
492 """changeid can be a changeset revision, node, or tag.
501 """changeid can be a changeset revision, node, or tag.
493 fileid can be a file revision or node."""
502 fileid can be a file revision or node."""
494 return context.filectx(self, path, changeid, fileid)
503 return context.filectx(self, path, changeid, fileid)
495
504
496 def getcwd(self):
505 def getcwd(self):
497 return self.dirstate.getcwd()
506 return self.dirstate.getcwd()
498
507
499 def pathto(self, f, cwd=None):
508 def pathto(self, f, cwd=None):
500 return self.dirstate.pathto(f, cwd)
509 return self.dirstate.pathto(f, cwd)
501
510
502 def wfile(self, f, mode='r'):
511 def wfile(self, f, mode='r'):
503 return self.wopener(f, mode)
512 return self.wopener(f, mode)
504
513
505 def _link(self, f):
514 def _link(self, f):
506 return os.path.islink(self.wjoin(f))
515 return os.path.islink(self.wjoin(f))
507
516
508 def _filter(self, filter, filename, data):
517 def _filter(self, filter, filename, data):
509 if filter not in self.filterpats:
518 if filter not in self.filterpats:
510 l = []
519 l = []
511 for pat, cmd in self.ui.configitems(filter):
520 for pat, cmd in self.ui.configitems(filter):
512 mf = util.matcher(self.root, "", [pat], [], [])[1]
521 mf = util.matcher(self.root, "", [pat], [], [])[1]
513 l.append((mf, cmd))
522 l.append((mf, cmd))
514 self.filterpats[filter] = l
523 self.filterpats[filter] = l
515
524
516 for mf, cmd in self.filterpats[filter]:
525 for mf, cmd in self.filterpats[filter]:
517 if mf(filename):
526 if mf(filename):
518 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
519 data = util.filter(data, cmd)
528 data = util.filter(data, cmd)
520 break
529 break
521
530
522 return data
531 return data
523
532
524 def wread(self, filename):
533 def wread(self, filename):
525 if self._link(filename):
534 if self._link(filename):
526 data = os.readlink(self.wjoin(filename))
535 data = os.readlink(self.wjoin(filename))
527 else:
536 else:
528 data = self.wopener(filename, 'r').read()
537 data = self.wopener(filename, 'r').read()
529 return self._filter("encode", filename, data)
538 return self._filter("encode", filename, data)
530
539
531 def wwrite(self, filename, data, flags):
540 def wwrite(self, filename, data, flags):
532 data = self._filter("decode", filename, data)
541 data = self._filter("decode", filename, data)
533 if "l" in flags:
542 if "l" in flags:
534 f = self.wjoin(filename)
543 f = self.wjoin(filename)
535 try:
544 try:
536 os.unlink(f)
545 os.unlink(f)
537 except OSError:
546 except OSError:
538 pass
547 pass
539 d = os.path.dirname(f)
548 d = os.path.dirname(f)
540 if not os.path.exists(d):
549 if not os.path.exists(d):
541 os.makedirs(d)
550 os.makedirs(d)
542 os.symlink(data, f)
551 os.symlink(data, f)
543 else:
552 else:
544 try:
553 try:
545 if self._link(filename):
554 if self._link(filename):
546 os.unlink(self.wjoin(filename))
555 os.unlink(self.wjoin(filename))
547 except OSError:
556 except OSError:
548 pass
557 pass
549 self.wopener(filename, 'w').write(data)
558 self.wopener(filename, 'w').write(data)
550 util.set_exec(self.wjoin(filename), "x" in flags)
559 util.set_exec(self.wjoin(filename), "x" in flags)
551
560
552 def wwritedata(self, filename, data):
561 def wwritedata(self, filename, data):
553 return self._filter("decode", filename, data)
562 return self._filter("decode", filename, data)
554
563
555 def transaction(self):
564 def transaction(self):
556 tr = self.transhandle
565 tr = self.transhandle
557 if tr != None and tr.running():
566 if tr != None and tr.running():
558 return tr.nest()
567 return tr.nest()
559
568
560 # save dirstate for rollback
569 # save dirstate for rollback
561 try:
570 try:
562 ds = self.opener("dirstate").read()
571 ds = self.opener("dirstate").read()
563 except IOError:
572 except IOError:
564 ds = ""
573 ds = ""
565 self.opener("journal.dirstate", "w").write(ds)
574 self.opener("journal.dirstate", "w").write(ds)
566
575
567 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
568 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
569 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
570 self.sjoin("journal"),
579 self.sjoin("journal"),
571 aftertrans(renames))
580 aftertrans(renames))
572 self.transhandle = tr
581 self.transhandle = tr
573 return tr
582 return tr
574
583
575 def recover(self):
584 def recover(self):
576 l = self.lock()
585 l = self.lock()
577 if os.path.exists(self.sjoin("journal")):
586 if os.path.exists(self.sjoin("journal")):
578 self.ui.status(_("rolling back interrupted transaction\n"))
587 self.ui.status(_("rolling back interrupted transaction\n"))
579 transaction.rollback(self.sopener, self.sjoin("journal"))
588 transaction.rollback(self.sopener, self.sjoin("journal"))
580 self.reload()
589 self.reload()
581 return True
590 return True
582 else:
591 else:
583 self.ui.warn(_("no interrupted transaction available\n"))
592 self.ui.warn(_("no interrupted transaction available\n"))
584 return False
593 return False
585
594
586 def rollback(self, wlock=None, lock=None):
595 def rollback(self, wlock=None, lock=None):
587 if not wlock:
596 if not wlock:
588 wlock = self.wlock()
597 wlock = self.wlock()
589 if not lock:
598 if not lock:
590 lock = self.lock()
599 lock = self.lock()
591 if os.path.exists(self.sjoin("undo")):
600 if os.path.exists(self.sjoin("undo")):
592 self.ui.status(_("rolling back last transaction\n"))
601 self.ui.status(_("rolling back last transaction\n"))
593 transaction.rollback(self.sopener, self.sjoin("undo"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
594 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
595 self.reload()
604 self.reload()
596 self.wreload()
605 self.wreload()
597 else:
606 else:
598 self.ui.warn(_("no rollback information available\n"))
607 self.ui.warn(_("no rollback information available\n"))
599
608
600 def wreload(self):
609 def wreload(self):
601 self.dirstate.reload()
610 self.dirstate.reload()
602
611
603 def reload(self):
612 def reload(self):
604 self.changelog.load()
613 self.changelog.load()
605 self.manifest.load()
614 self.manifest.load()
606 self.tagscache = None
615 self.tagscache = None
607 self.nodetagscache = None
616 self.nodetagscache = None
608
617
609 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
618 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
610 desc=None):
619 desc=None):
611 try:
620 try:
612 l = lock.lock(lockname, 0, releasefn, desc=desc)
621 l = lock.lock(lockname, 0, releasefn, desc=desc)
613 except lock.LockHeld, inst:
622 except lock.LockHeld, inst:
614 if not wait:
623 if not wait:
615 raise
624 raise
616 self.ui.warn(_("waiting for lock on %s held by %r\n") %
625 self.ui.warn(_("waiting for lock on %s held by %r\n") %
617 (desc, inst.locker))
626 (desc, inst.locker))
618 # default to 600 seconds timeout
627 # default to 600 seconds timeout
619 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
628 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
620 releasefn, desc=desc)
629 releasefn, desc=desc)
621 if acquirefn:
630 if acquirefn:
622 acquirefn()
631 acquirefn()
623 return l
632 return l
624
633
625 def lock(self, wait=1):
634 def lock(self, wait=1):
626 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
635 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
627 desc=_('repository %s') % self.origroot)
636 desc=_('repository %s') % self.origroot)
628
637
629 def wlock(self, wait=1):
638 def wlock(self, wait=1):
630 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
639 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
631 self.wreload,
640 self.wreload,
632 desc=_('working directory of %s') % self.origroot)
641 desc=_('working directory of %s') % self.origroot)
633
642
634 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
643 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
635 """
644 """
636 commit an individual file as part of a larger transaction
645 commit an individual file as part of a larger transaction
637 """
646 """
638
647
639 t = self.wread(fn)
648 t = self.wread(fn)
640 fl = self.file(fn)
649 fl = self.file(fn)
641 fp1 = manifest1.get(fn, nullid)
650 fp1 = manifest1.get(fn, nullid)
642 fp2 = manifest2.get(fn, nullid)
651 fp2 = manifest2.get(fn, nullid)
643
652
644 meta = {}
653 meta = {}
645 cp = self.dirstate.copied(fn)
654 cp = self.dirstate.copied(fn)
646 if cp:
655 if cp:
647 # Mark the new revision of this file as a copy of another
656 # Mark the new revision of this file as a copy of another
648 # file. This copy data will effectively act as a parent
657 # file. This copy data will effectively act as a parent
649 # of this new revision. If this is a merge, the first
658 # of this new revision. If this is a merge, the first
650 # parent will be the nullid (meaning "look up the copy data")
659 # parent will be the nullid (meaning "look up the copy data")
651 # and the second one will be the other parent. For example:
660 # and the second one will be the other parent. For example:
652 #
661 #
653 # 0 --- 1 --- 3 rev1 changes file foo
662 # 0 --- 1 --- 3 rev1 changes file foo
654 # \ / rev2 renames foo to bar and changes it
663 # \ / rev2 renames foo to bar and changes it
655 # \- 2 -/ rev3 should have bar with all changes and
664 # \- 2 -/ rev3 should have bar with all changes and
656 # should record that bar descends from
665 # should record that bar descends from
657 # bar in rev2 and foo in rev1
666 # bar in rev2 and foo in rev1
658 #
667 #
659 # this allows this merge to succeed:
668 # this allows this merge to succeed:
660 #
669 #
661 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
670 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
662 # \ / merging rev3 and rev4 should use bar@rev2
671 # \ / merging rev3 and rev4 should use bar@rev2
663 # \- 2 --- 4 as the merge base
672 # \- 2 --- 4 as the merge base
664 #
673 #
665 meta["copy"] = cp
674 meta["copy"] = cp
666 if not manifest2: # not a branch merge
675 if not manifest2: # not a branch merge
667 meta["copyrev"] = hex(manifest1.get(cp, nullid))
676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
668 fp2 = nullid
677 fp2 = nullid
669 elif fp2 != nullid: # copied on remote side
678 elif fp2 != nullid: # copied on remote side
670 meta["copyrev"] = hex(manifest1.get(cp, nullid))
679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
671 elif fp1 != nullid: # copied on local side, reversed
680 elif fp1 != nullid: # copied on local side, reversed
672 meta["copyrev"] = hex(manifest2.get(cp))
681 meta["copyrev"] = hex(manifest2.get(cp))
673 fp2 = fp1
682 fp2 = fp1
674 else: # directory rename
683 else: # directory rename
675 meta["copyrev"] = hex(manifest1.get(cp, nullid))
684 meta["copyrev"] = hex(manifest1.get(cp, nullid))
676 self.ui.debug(_(" %s: copy %s:%s\n") %
685 self.ui.debug(_(" %s: copy %s:%s\n") %
677 (fn, cp, meta["copyrev"]))
686 (fn, cp, meta["copyrev"]))
678 fp1 = nullid
687 fp1 = nullid
679 elif fp2 != nullid:
688 elif fp2 != nullid:
680 # is one parent an ancestor of the other?
689 # is one parent an ancestor of the other?
681 fpa = fl.ancestor(fp1, fp2)
690 fpa = fl.ancestor(fp1, fp2)
682 if fpa == fp1:
691 if fpa == fp1:
683 fp1, fp2 = fp2, nullid
692 fp1, fp2 = fp2, nullid
684 elif fpa == fp2:
693 elif fpa == fp2:
685 fp2 = nullid
694 fp2 = nullid
686
695
687 # is the file unmodified from the parent? report existing entry
696 # is the file unmodified from the parent? report existing entry
688 if fp2 == nullid and not fl.cmp(fp1, t):
697 if fp2 == nullid and not fl.cmp(fp1, t):
689 return fp1
698 return fp1
690
699
691 changelist.append(fn)
700 changelist.append(fn)
692 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
701 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
693
702
694 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
703 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
695 if p1 is None:
704 if p1 is None:
696 p1, p2 = self.dirstate.parents()
705 p1, p2 = self.dirstate.parents()
697 return self.commit(files=files, text=text, user=user, date=date,
706 return self.commit(files=files, text=text, user=user, date=date,
698 p1=p1, p2=p2, wlock=wlock, extra=extra)
707 p1=p1, p2=p2, wlock=wlock, extra=extra)
699
708
700 def commit(self, files=None, text="", user=None, date=None,
709 def commit(self, files=None, text="", user=None, date=None,
701 match=util.always, force=False, lock=None, wlock=None,
710 match=util.always, force=False, lock=None, wlock=None,
702 force_editor=False, p1=None, p2=None, extra={}):
711 force_editor=False, p1=None, p2=None, extra={}):
703
712
704 commit = []
713 commit = []
705 remove = []
714 remove = []
706 changed = []
715 changed = []
707 use_dirstate = (p1 is None) # not rawcommit
716 use_dirstate = (p1 is None) # not rawcommit
708 extra = extra.copy()
717 extra = extra.copy()
709
718
710 if use_dirstate:
719 if use_dirstate:
711 if files:
720 if files:
712 for f in files:
721 for f in files:
713 s = self.dirstate.state(f)
722 s = self.dirstate.state(f)
714 if s in 'nmai':
723 if s in 'nmai':
715 commit.append(f)
724 commit.append(f)
716 elif s == 'r':
725 elif s == 'r':
717 remove.append(f)
726 remove.append(f)
718 else:
727 else:
719 self.ui.warn(_("%s not tracked!\n") % f)
728 self.ui.warn(_("%s not tracked!\n") % f)
720 else:
729 else:
721 changes = self.status(match=match)[:5]
730 changes = self.status(match=match)[:5]
722 modified, added, removed, deleted, unknown = changes
731 modified, added, removed, deleted, unknown = changes
723 commit = modified + added
732 commit = modified + added
724 remove = removed
733 remove = removed
725 else:
734 else:
726 commit = files
735 commit = files
727
736
728 if use_dirstate:
737 if use_dirstate:
729 p1, p2 = self.dirstate.parents()
738 p1, p2 = self.dirstate.parents()
730 update_dirstate = True
739 update_dirstate = True
731 else:
740 else:
732 p1, p2 = p1, p2 or nullid
741 p1, p2 = p1, p2 or nullid
733 update_dirstate = (self.dirstate.parents()[0] == p1)
742 update_dirstate = (self.dirstate.parents()[0] == p1)
734
743
735 c1 = self.changelog.read(p1)
744 c1 = self.changelog.read(p1)
736 c2 = self.changelog.read(p2)
745 c2 = self.changelog.read(p2)
737 m1 = self.manifest.read(c1[0]).copy()
746 m1 = self.manifest.read(c1[0]).copy()
738 m2 = self.manifest.read(c2[0])
747 m2 = self.manifest.read(c2[0])
739
748
740 if use_dirstate:
749 if use_dirstate:
741 branchname = self.workingctx().branch()
750 branchname = self.workingctx().branch()
742 try:
751 try:
743 branchname = branchname.decode('UTF-8').encode('UTF-8')
752 branchname = branchname.decode('UTF-8').encode('UTF-8')
744 except UnicodeDecodeError:
753 except UnicodeDecodeError:
745 raise util.Abort(_('branch name not in UTF-8!'))
754 raise util.Abort(_('branch name not in UTF-8!'))
746 else:
755 else:
747 branchname = ""
756 branchname = ""
748
757
749 if use_dirstate:
758 if use_dirstate:
750 oldname = c1[5].get("branch") # stored in UTF-8
759 oldname = c1[5].get("branch") # stored in UTF-8
751 if not commit and not remove and not force and p2 == nullid and \
760 if not commit and not remove and not force and p2 == nullid and \
752 branchname == oldname:
761 branchname == oldname:
753 self.ui.status(_("nothing changed\n"))
762 self.ui.status(_("nothing changed\n"))
754 return None
763 return None
755
764
756 xp1 = hex(p1)
765 xp1 = hex(p1)
757 if p2 == nullid: xp2 = ''
766 if p2 == nullid: xp2 = ''
758 else: xp2 = hex(p2)
767 else: xp2 = hex(p2)
759
768
760 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
769 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
761
770
762 if not wlock:
771 if not wlock:
763 wlock = self.wlock()
772 wlock = self.wlock()
764 if not lock:
773 if not lock:
765 lock = self.lock()
774 lock = self.lock()
766 tr = self.transaction()
775 tr = self.transaction()
767
776
768 # check in files
777 # check in files
769 new = {}
778 new = {}
770 linkrev = self.changelog.count()
779 linkrev = self.changelog.count()
771 commit.sort()
780 commit.sort()
772 is_exec = util.execfunc(self.root, m1.execf)
781 is_exec = util.execfunc(self.root, m1.execf)
773 is_link = util.linkfunc(self.root, m1.linkf)
782 is_link = util.linkfunc(self.root, m1.linkf)
774 for f in commit:
783 for f in commit:
775 self.ui.note(f + "\n")
784 self.ui.note(f + "\n")
776 try:
785 try:
777 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
786 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
778 new_exec = is_exec(f)
787 new_exec = is_exec(f)
779 new_link = is_link(f)
788 new_link = is_link(f)
780 if not changed or changed[-1] != f:
789 if not changed or changed[-1] != f:
781 # mention the file in the changelog if some flag changed,
790 # mention the file in the changelog if some flag changed,
782 # even if there was no content change.
791 # even if there was no content change.
783 old_exec = m1.execf(f)
792 old_exec = m1.execf(f)
784 old_link = m1.linkf(f)
793 old_link = m1.linkf(f)
785 if old_exec != new_exec or old_link != new_link:
794 if old_exec != new_exec or old_link != new_link:
786 changed.append(f)
795 changed.append(f)
787 m1.set(f, new_exec, new_link)
796 m1.set(f, new_exec, new_link)
788 except (OSError, IOError):
797 except (OSError, IOError):
789 if use_dirstate:
798 if use_dirstate:
790 self.ui.warn(_("trouble committing %s!\n") % f)
799 self.ui.warn(_("trouble committing %s!\n") % f)
791 raise
800 raise
792 else:
801 else:
793 remove.append(f)
802 remove.append(f)
794
803
795 # update manifest
804 # update manifest
796 m1.update(new)
805 m1.update(new)
797 remove.sort()
806 remove.sort()
798 removed = []
807 removed = []
799
808
800 for f in remove:
809 for f in remove:
801 if f in m1:
810 if f in m1:
802 del m1[f]
811 del m1[f]
803 removed.append(f)
812 removed.append(f)
804 elif f in m2:
813 elif f in m2:
805 removed.append(f)
814 removed.append(f)
806 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
815 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
807
816
808 # add changeset
817 # add changeset
809 new = new.keys()
818 new = new.keys()
810 new.sort()
819 new.sort()
811
820
812 user = user or self.ui.username()
821 user = user or self.ui.username()
813 if not text or force_editor:
822 if not text or force_editor:
814 edittext = []
823 edittext = []
815 if text:
824 if text:
816 edittext.append(text)
825 edittext.append(text)
817 edittext.append("")
826 edittext.append("")
818 edittext.append("HG: user: %s" % user)
827 edittext.append("HG: user: %s" % user)
819 if p2 != nullid:
828 if p2 != nullid:
820 edittext.append("HG: branch merge")
829 edittext.append("HG: branch merge")
821 if branchname:
830 if branchname:
822 edittext.append("HG: branch %s" % util.tolocal(branchname))
831 edittext.append("HG: branch %s" % util.tolocal(branchname))
823 edittext.extend(["HG: changed %s" % f for f in changed])
832 edittext.extend(["HG: changed %s" % f for f in changed])
824 edittext.extend(["HG: removed %s" % f for f in removed])
833 edittext.extend(["HG: removed %s" % f for f in removed])
825 if not changed and not remove:
834 if not changed and not remove:
826 edittext.append("HG: no files changed")
835 edittext.append("HG: no files changed")
827 edittext.append("")
836 edittext.append("")
828 # run editor in the repository root
837 # run editor in the repository root
829 olddir = os.getcwd()
838 olddir = os.getcwd()
830 os.chdir(self.root)
839 os.chdir(self.root)
831 text = self.ui.edit("\n".join(edittext), user)
840 text = self.ui.edit("\n".join(edittext), user)
832 os.chdir(olddir)
841 os.chdir(olddir)
833
842
834 lines = [line.rstrip() for line in text.rstrip().splitlines()]
843 lines = [line.rstrip() for line in text.rstrip().splitlines()]
835 while lines and not lines[0]:
844 while lines and not lines[0]:
836 del lines[0]
845 del lines[0]
837 if not lines:
846 if not lines:
838 return None
847 return None
839 text = '\n'.join(lines)
848 text = '\n'.join(lines)
840 if branchname:
849 if branchname:
841 extra["branch"] = branchname
850 extra["branch"] = branchname
842 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
851 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
843 user, date, extra)
852 user, date, extra)
844 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
853 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
845 parent2=xp2)
854 parent2=xp2)
846 tr.close()
855 tr.close()
847
856
848 if self.branchcache and "branch" in extra:
857 if self.branchcache and "branch" in extra:
849 self.branchcache[util.tolocal(extra["branch"])] = n
858 self.branchcache[util.tolocal(extra["branch"])] = n
850
859
851 if use_dirstate or update_dirstate:
860 if use_dirstate or update_dirstate:
852 self.dirstate.setparents(n)
861 self.dirstate.setparents(n)
853 if use_dirstate:
862 if use_dirstate:
854 self.dirstate.update(new, "n")
863 self.dirstate.update(new, "n")
855 self.dirstate.forget(removed)
864 self.dirstate.forget(removed)
856
865
857 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
866 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
858 return n
867 return n
859
868
860 def walk(self, node=None, files=[], match=util.always, badmatch=None):
869 def walk(self, node=None, files=[], match=util.always, badmatch=None):
861 '''
870 '''
862 walk recursively through the directory tree or a given
871 walk recursively through the directory tree or a given
863 changeset, finding all files matched by the match
872 changeset, finding all files matched by the match
864 function
873 function
865
874
866 results are yielded in a tuple (src, filename), where src
875 results are yielded in a tuple (src, filename), where src
867 is one of:
876 is one of:
868 'f' the file was found in the directory tree
877 'f' the file was found in the directory tree
869 'm' the file was only in the dirstate and not in the tree
878 'm' the file was only in the dirstate and not in the tree
870 'b' file was not found and matched badmatch
879 'b' file was not found and matched badmatch
871 '''
880 '''
872
881
873 if node:
882 if node:
874 fdict = dict.fromkeys(files)
883 fdict = dict.fromkeys(files)
875 # for dirstate.walk, files=['.'] means "walk the whole tree".
884 # for dirstate.walk, files=['.'] means "walk the whole tree".
876 # follow that here, too
885 # follow that here, too
877 fdict.pop('.', None)
886 fdict.pop('.', None)
878 mdict = self.manifest.read(self.changelog.read(node)[0])
887 mdict = self.manifest.read(self.changelog.read(node)[0])
879 mfiles = mdict.keys()
888 mfiles = mdict.keys()
880 mfiles.sort()
889 mfiles.sort()
881 for fn in mfiles:
890 for fn in mfiles:
882 for ffn in fdict:
891 for ffn in fdict:
883 # match if the file is the exact name or a directory
892 # match if the file is the exact name or a directory
884 if ffn == fn or fn.startswith("%s/" % ffn):
893 if ffn == fn or fn.startswith("%s/" % ffn):
885 del fdict[ffn]
894 del fdict[ffn]
886 break
895 break
887 if match(fn):
896 if match(fn):
888 yield 'm', fn
897 yield 'm', fn
889 ffiles = fdict.keys()
898 ffiles = fdict.keys()
890 ffiles.sort()
899 ffiles.sort()
891 for fn in ffiles:
900 for fn in ffiles:
892 if badmatch and badmatch(fn):
901 if badmatch and badmatch(fn):
893 if match(fn):
902 if match(fn):
894 yield 'b', fn
903 yield 'b', fn
895 else:
904 else:
896 self.ui.warn(_('%s: No such file in rev %s\n')
905 self.ui.warn(_('%s: No such file in rev %s\n')
897 % (self.pathto(fn), short(node)))
906 % (self.pathto(fn), short(node)))
898 else:
907 else:
899 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
908 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
900 yield src, fn
909 yield src, fn
901
910
902 def status(self, node1=None, node2=None, files=[], match=util.always,
911 def status(self, node1=None, node2=None, files=[], match=util.always,
903 wlock=None, list_ignored=False, list_clean=False):
912 wlock=None, list_ignored=False, list_clean=False):
904 """return status of files between two nodes or node and working directory
913 """return status of files between two nodes or node and working directory
905
914
906 If node1 is None, use the first dirstate parent instead.
915 If node1 is None, use the first dirstate parent instead.
907 If node2 is None, compare node1 with working directory.
916 If node2 is None, compare node1 with working directory.
908 """
917 """
909
918
910 def fcmp(fn, getnode):
919 def fcmp(fn, getnode):
911 t1 = self.wread(fn)
920 t1 = self.wread(fn)
912 return self.file(fn).cmp(getnode(fn), t1)
921 return self.file(fn).cmp(getnode(fn), t1)
913
922
914 def mfmatches(node):
923 def mfmatches(node):
915 change = self.changelog.read(node)
924 change = self.changelog.read(node)
916 mf = self.manifest.read(change[0]).copy()
925 mf = self.manifest.read(change[0]).copy()
917 for fn in mf.keys():
926 for fn in mf.keys():
918 if not match(fn):
927 if not match(fn):
919 del mf[fn]
928 del mf[fn]
920 return mf
929 return mf
921
930
922 modified, added, removed, deleted, unknown = [], [], [], [], []
931 modified, added, removed, deleted, unknown = [], [], [], [], []
923 ignored, clean = [], []
932 ignored, clean = [], []
924
933
925 compareworking = False
934 compareworking = False
926 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
935 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
927 compareworking = True
936 compareworking = True
928
937
929 if not compareworking:
938 if not compareworking:
930 # read the manifest from node1 before the manifest from node2,
939 # read the manifest from node1 before the manifest from node2,
931 # so that we'll hit the manifest cache if we're going through
940 # so that we'll hit the manifest cache if we're going through
932 # all the revisions in parent->child order.
941 # all the revisions in parent->child order.
933 mf1 = mfmatches(node1)
942 mf1 = mfmatches(node1)
934
943
935 mywlock = False
944 mywlock = False
936
945
937 # are we comparing the working directory?
946 # are we comparing the working directory?
938 if not node2:
947 if not node2:
939 (lookup, modified, added, removed, deleted, unknown,
948 (lookup, modified, added, removed, deleted, unknown,
940 ignored, clean) = self.dirstate.status(files, match,
949 ignored, clean) = self.dirstate.status(files, match,
941 list_ignored, list_clean)
950 list_ignored, list_clean)
942
951
943 # are we comparing working dir against its parent?
952 # are we comparing working dir against its parent?
944 if compareworking:
953 if compareworking:
945 if lookup:
954 if lookup:
946 # do a full compare of any files that might have changed
955 # do a full compare of any files that might have changed
947 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
956 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
948 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
957 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
949 nullid)
958 nullid)
950 for f in lookup:
959 for f in lookup:
951 if fcmp(f, getnode):
960 if fcmp(f, getnode):
952 modified.append(f)
961 modified.append(f)
953 else:
962 else:
954 clean.append(f)
963 clean.append(f)
955 if not wlock and not mywlock:
964 if not wlock and not mywlock:
956 mywlock = True
965 mywlock = True
957 try:
966 try:
958 wlock = self.wlock(wait=0)
967 wlock = self.wlock(wait=0)
959 except lock.LockException:
968 except lock.LockException:
960 pass
969 pass
961 if wlock:
970 if wlock:
962 self.dirstate.update([f], "n")
971 self.dirstate.update([f], "n")
963 else:
972 else:
964 # we are comparing working dir against non-parent
973 # we are comparing working dir against non-parent
965 # generate a pseudo-manifest for the working dir
974 # generate a pseudo-manifest for the working dir
966 # XXX: create it in dirstate.py ?
975 # XXX: create it in dirstate.py ?
967 mf2 = mfmatches(self.dirstate.parents()[0])
976 mf2 = mfmatches(self.dirstate.parents()[0])
968 is_exec = util.execfunc(self.root, mf2.execf)
977 is_exec = util.execfunc(self.root, mf2.execf)
969 is_link = util.linkfunc(self.root, mf2.linkf)
978 is_link = util.linkfunc(self.root, mf2.linkf)
970 for f in lookup + modified + added:
979 for f in lookup + modified + added:
971 mf2[f] = ""
980 mf2[f] = ""
972 mf2.set(f, is_exec(f), is_link(f))
981 mf2.set(f, is_exec(f), is_link(f))
973 for f in removed:
982 for f in removed:
974 if f in mf2:
983 if f in mf2:
975 del mf2[f]
984 del mf2[f]
976
985
977 if mywlock and wlock:
986 if mywlock and wlock:
978 wlock.release()
987 wlock.release()
979 else:
988 else:
980 # we are comparing two revisions
989 # we are comparing two revisions
981 mf2 = mfmatches(node2)
990 mf2 = mfmatches(node2)
982
991
983 if not compareworking:
992 if not compareworking:
984 # flush lists from dirstate before comparing manifests
993 # flush lists from dirstate before comparing manifests
985 modified, added, clean = [], [], []
994 modified, added, clean = [], [], []
986
995
987 # make sure to sort the files so we talk to the disk in a
996 # make sure to sort the files so we talk to the disk in a
988 # reasonable order
997 # reasonable order
989 mf2keys = mf2.keys()
998 mf2keys = mf2.keys()
990 mf2keys.sort()
999 mf2keys.sort()
991 getnode = lambda fn: mf1.get(fn, nullid)
1000 getnode = lambda fn: mf1.get(fn, nullid)
992 for fn in mf2keys:
1001 for fn in mf2keys:
993 if mf1.has_key(fn):
1002 if mf1.has_key(fn):
994 if mf1.flags(fn) != mf2.flags(fn) or \
1003 if mf1.flags(fn) != mf2.flags(fn) or \
995 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1004 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
996 fcmp(fn, getnode))):
1005 fcmp(fn, getnode))):
997 modified.append(fn)
1006 modified.append(fn)
998 elif list_clean:
1007 elif list_clean:
999 clean.append(fn)
1008 clean.append(fn)
1000 del mf1[fn]
1009 del mf1[fn]
1001 else:
1010 else:
1002 added.append(fn)
1011 added.append(fn)
1003
1012
1004 removed = mf1.keys()
1013 removed = mf1.keys()
1005
1014
1006 # sort and return results:
1015 # sort and return results:
1007 for l in modified, added, removed, deleted, unknown, ignored, clean:
1016 for l in modified, added, removed, deleted, unknown, ignored, clean:
1008 l.sort()
1017 l.sort()
1009 return (modified, added, removed, deleted, unknown, ignored, clean)
1018 return (modified, added, removed, deleted, unknown, ignored, clean)
1010
1019
1011 def add(self, list, wlock=None):
1020 def add(self, list, wlock=None):
1012 if not wlock:
1021 if not wlock:
1013 wlock = self.wlock()
1022 wlock = self.wlock()
1014 for f in list:
1023 for f in list:
1015 p = self.wjoin(f)
1024 p = self.wjoin(f)
1016 islink = os.path.islink(p)
1025 islink = os.path.islink(p)
1017 size = os.lstat(p).st_size
1026 size = os.lstat(p).st_size
1018 if size > 10000000:
1027 if size > 10000000:
1019 self.ui.warn(_("%s: files over 10MB may cause memory and"
1028 self.ui.warn(_("%s: files over 10MB may cause memory and"
1020 " performance problems\n"
1029 " performance problems\n"
1021 "(use 'hg revert %s' to unadd the file)\n")
1030 "(use 'hg revert %s' to unadd the file)\n")
1022 % (f, f))
1031 % (f, f))
1023 if not islink and not os.path.exists(p):
1032 if not islink and not os.path.exists(p):
1024 self.ui.warn(_("%s does not exist!\n") % f)
1033 self.ui.warn(_("%s does not exist!\n") % f)
1025 elif not islink and not os.path.isfile(p):
1034 elif not islink and not os.path.isfile(p):
1026 self.ui.warn(_("%s not added: only files and symlinks "
1035 self.ui.warn(_("%s not added: only files and symlinks "
1027 "supported currently\n") % f)
1036 "supported currently\n") % f)
1028 elif self.dirstate.state(f) in 'an':
1037 elif self.dirstate.state(f) in 'an':
1029 self.ui.warn(_("%s already tracked!\n") % f)
1038 self.ui.warn(_("%s already tracked!\n") % f)
1030 else:
1039 else:
1031 self.dirstate.update([f], "a")
1040 self.dirstate.update([f], "a")
1032
1041
1033 def forget(self, list, wlock=None):
1042 def forget(self, list, wlock=None):
1034 if not wlock:
1043 if not wlock:
1035 wlock = self.wlock()
1044 wlock = self.wlock()
1036 for f in list:
1045 for f in list:
1037 if self.dirstate.state(f) not in 'ai':
1046 if self.dirstate.state(f) not in 'ai':
1038 self.ui.warn(_("%s not added!\n") % f)
1047 self.ui.warn(_("%s not added!\n") % f)
1039 else:
1048 else:
1040 self.dirstate.forget([f])
1049 self.dirstate.forget([f])
1041
1050
1042 def remove(self, list, unlink=False, wlock=None):
1051 def remove(self, list, unlink=False, wlock=None):
1043 if unlink:
1052 if unlink:
1044 for f in list:
1053 for f in list:
1045 try:
1054 try:
1046 util.unlink(self.wjoin(f))
1055 util.unlink(self.wjoin(f))
1047 except OSError, inst:
1056 except OSError, inst:
1048 if inst.errno != errno.ENOENT:
1057 if inst.errno != errno.ENOENT:
1049 raise
1058 raise
1050 if not wlock:
1059 if not wlock:
1051 wlock = self.wlock()
1060 wlock = self.wlock()
1052 for f in list:
1061 for f in list:
1053 if unlink and os.path.exists(self.wjoin(f)):
1062 if unlink and os.path.exists(self.wjoin(f)):
1054 self.ui.warn(_("%s still exists!\n") % f)
1063 self.ui.warn(_("%s still exists!\n") % f)
1055 elif self.dirstate.state(f) == 'a':
1064 elif self.dirstate.state(f) == 'a':
1056 self.dirstate.forget([f])
1065 self.dirstate.forget([f])
1057 elif f not in self.dirstate:
1066 elif f not in self.dirstate:
1058 self.ui.warn(_("%s not tracked!\n") % f)
1067 self.ui.warn(_("%s not tracked!\n") % f)
1059 else:
1068 else:
1060 self.dirstate.update([f], "r")
1069 self.dirstate.update([f], "r")
1061
1070
1062 def undelete(self, list, wlock=None):
1071 def undelete(self, list, wlock=None):
1063 p = self.dirstate.parents()[0]
1072 p = self.dirstate.parents()[0]
1064 mn = self.changelog.read(p)[0]
1073 mn = self.changelog.read(p)[0]
1065 m = self.manifest.read(mn)
1074 m = self.manifest.read(mn)
1066 if not wlock:
1075 if not wlock:
1067 wlock = self.wlock()
1076 wlock = self.wlock()
1068 for f in list:
1077 for f in list:
1069 if self.dirstate.state(f) not in "r":
1078 if self.dirstate.state(f) not in "r":
1070 self.ui.warn("%s not removed!\n" % f)
1079 self.ui.warn("%s not removed!\n" % f)
1071 else:
1080 else:
1072 t = self.file(f).read(m[f])
1081 t = self.file(f).read(m[f])
1073 self.wwrite(f, t, m.flags(f))
1082 self.wwrite(f, t, m.flags(f))
1074 self.dirstate.update([f], "n")
1083 self.dirstate.update([f], "n")
1075
1084
1076 def copy(self, source, dest, wlock=None):
1085 def copy(self, source, dest, wlock=None):
1077 p = self.wjoin(dest)
1086 p = self.wjoin(dest)
1078 if not (os.path.exists(p) or os.path.islink(p)):
1087 if not (os.path.exists(p) or os.path.islink(p)):
1079 self.ui.warn(_("%s does not exist!\n") % dest)
1088 self.ui.warn(_("%s does not exist!\n") % dest)
1080 elif not (os.path.isfile(p) or os.path.islink(p)):
1089 elif not (os.path.isfile(p) or os.path.islink(p)):
1081 self.ui.warn(_("copy failed: %s is not a file or a "
1090 self.ui.warn(_("copy failed: %s is not a file or a "
1082 "symbolic link\n") % dest)
1091 "symbolic link\n") % dest)
1083 else:
1092 else:
1084 if not wlock:
1093 if not wlock:
1085 wlock = self.wlock()
1094 wlock = self.wlock()
1086 if self.dirstate.state(dest) == '?':
1095 if self.dirstate.state(dest) == '?':
1087 self.dirstate.update([dest], "a")
1096 self.dirstate.update([dest], "a")
1088 self.dirstate.copy(source, dest)
1097 self.dirstate.copy(source, dest)
1089
1098
1090 def heads(self, start=None):
1099 def heads(self, start=None):
1091 heads = self.changelog.heads(start)
1100 heads = self.changelog.heads(start)
1092 # sort the output in rev descending order
1101 # sort the output in rev descending order
1093 heads = [(-self.changelog.rev(h), h) for h in heads]
1102 heads = [(-self.changelog.rev(h), h) for h in heads]
1094 heads.sort()
1103 heads.sort()
1095 return [n for (r, n) in heads]
1104 return [n for (r, n) in heads]
1096
1105
1097 def branches(self, nodes):
1106 def branches(self, nodes):
1098 if not nodes:
1107 if not nodes:
1099 nodes = [self.changelog.tip()]
1108 nodes = [self.changelog.tip()]
1100 b = []
1109 b = []
1101 for n in nodes:
1110 for n in nodes:
1102 t = n
1111 t = n
1103 while 1:
1112 while 1:
1104 p = self.changelog.parents(n)
1113 p = self.changelog.parents(n)
1105 if p[1] != nullid or p[0] == nullid:
1114 if p[1] != nullid or p[0] == nullid:
1106 b.append((t, n, p[0], p[1]))
1115 b.append((t, n, p[0], p[1]))
1107 break
1116 break
1108 n = p[0]
1117 n = p[0]
1109 return b
1118 return b
1110
1119
1111 def between(self, pairs):
1120 def between(self, pairs):
1112 r = []
1121 r = []
1113
1122
1114 for top, bottom in pairs:
1123 for top, bottom in pairs:
1115 n, l, i = top, [], 0
1124 n, l, i = top, [], 0
1116 f = 1
1125 f = 1
1117
1126
1118 while n != bottom:
1127 while n != bottom:
1119 p = self.changelog.parents(n)[0]
1128 p = self.changelog.parents(n)[0]
1120 if i == f:
1129 if i == f:
1121 l.append(n)
1130 l.append(n)
1122 f = f * 2
1131 f = f * 2
1123 n = p
1132 n = p
1124 i += 1
1133 i += 1
1125
1134
1126 r.append(l)
1135 r.append(l)
1127
1136
1128 return r
1137 return r
1129
1138
1130 def findincoming(self, remote, base=None, heads=None, force=False):
1139 def findincoming(self, remote, base=None, heads=None, force=False):
1131 """Return list of roots of the subsets of missing nodes from remote
1140 """Return list of roots of the subsets of missing nodes from remote
1132
1141
1133 If base dict is specified, assume that these nodes and their parents
1142 If base dict is specified, assume that these nodes and their parents
1134 exist on the remote side and that no child of a node of base exists
1143 exist on the remote side and that no child of a node of base exists
1135 in both remote and self.
1144 in both remote and self.
1136 Furthermore base will be updated to include the nodes that exists
1145 Furthermore base will be updated to include the nodes that exists
1137 in self and remote but no children exists in self and remote.
1146 in self and remote but no children exists in self and remote.
1138 If a list of heads is specified, return only nodes which are heads
1147 If a list of heads is specified, return only nodes which are heads
1139 or ancestors of these heads.
1148 or ancestors of these heads.
1140
1149
1141 All the ancestors of base are in self and in remote.
1150 All the ancestors of base are in self and in remote.
1142 All the descendants of the list returned are missing in self.
1151 All the descendants of the list returned are missing in self.
1143 (and so we know that the rest of the nodes are missing in remote, see
1152 (and so we know that the rest of the nodes are missing in remote, see
1144 outgoing)
1153 outgoing)
1145 """
1154 """
1146 m = self.changelog.nodemap
1155 m = self.changelog.nodemap
1147 search = []
1156 search = []
1148 fetch = {}
1157 fetch = {}
1149 seen = {}
1158 seen = {}
1150 seenbranch = {}
1159 seenbranch = {}
1151 if base == None:
1160 if base == None:
1152 base = {}
1161 base = {}
1153
1162
1154 if not heads:
1163 if not heads:
1155 heads = remote.heads()
1164 heads = remote.heads()
1156
1165
1157 if self.changelog.tip() == nullid:
1166 if self.changelog.tip() == nullid:
1158 base[nullid] = 1
1167 base[nullid] = 1
1159 if heads != [nullid]:
1168 if heads != [nullid]:
1160 return [nullid]
1169 return [nullid]
1161 return []
1170 return []
1162
1171
1163 # assume we're closer to the tip than the root
1172 # assume we're closer to the tip than the root
1164 # and start by examining the heads
1173 # and start by examining the heads
1165 self.ui.status(_("searching for changes\n"))
1174 self.ui.status(_("searching for changes\n"))
1166
1175
1167 unknown = []
1176 unknown = []
1168 for h in heads:
1177 for h in heads:
1169 if h not in m:
1178 if h not in m:
1170 unknown.append(h)
1179 unknown.append(h)
1171 else:
1180 else:
1172 base[h] = 1
1181 base[h] = 1
1173
1182
1174 if not unknown:
1183 if not unknown:
1175 return []
1184 return []
1176
1185
1177 req = dict.fromkeys(unknown)
1186 req = dict.fromkeys(unknown)
1178 reqcnt = 0
1187 reqcnt = 0
1179
1188
1180 # search through remote branches
1189 # search through remote branches
1181 # a 'branch' here is a linear segment of history, with four parts:
1190 # a 'branch' here is a linear segment of history, with four parts:
1182 # head, root, first parent, second parent
1191 # head, root, first parent, second parent
1183 # (a branch always has two parents (or none) by definition)
1192 # (a branch always has two parents (or none) by definition)
1184 unknown = remote.branches(unknown)
1193 unknown = remote.branches(unknown)
1185 while unknown:
1194 while unknown:
1186 r = []
1195 r = []
1187 while unknown:
1196 while unknown:
1188 n = unknown.pop(0)
1197 n = unknown.pop(0)
1189 if n[0] in seen:
1198 if n[0] in seen:
1190 continue
1199 continue
1191
1200
1192 self.ui.debug(_("examining %s:%s\n")
1201 self.ui.debug(_("examining %s:%s\n")
1193 % (short(n[0]), short(n[1])))
1202 % (short(n[0]), short(n[1])))
1194 if n[0] == nullid: # found the end of the branch
1203 if n[0] == nullid: # found the end of the branch
1195 pass
1204 pass
1196 elif n in seenbranch:
1205 elif n in seenbranch:
1197 self.ui.debug(_("branch already found\n"))
1206 self.ui.debug(_("branch already found\n"))
1198 continue
1207 continue
1199 elif n[1] and n[1] in m: # do we know the base?
1208 elif n[1] and n[1] in m: # do we know the base?
1200 self.ui.debug(_("found incomplete branch %s:%s\n")
1209 self.ui.debug(_("found incomplete branch %s:%s\n")
1201 % (short(n[0]), short(n[1])))
1210 % (short(n[0]), short(n[1])))
1202 search.append(n) # schedule branch range for scanning
1211 search.append(n) # schedule branch range for scanning
1203 seenbranch[n] = 1
1212 seenbranch[n] = 1
1204 else:
1213 else:
1205 if n[1] not in seen and n[1] not in fetch:
1214 if n[1] not in seen and n[1] not in fetch:
1206 if n[2] in m and n[3] in m:
1215 if n[2] in m and n[3] in m:
1207 self.ui.debug(_("found new changeset %s\n") %
1216 self.ui.debug(_("found new changeset %s\n") %
1208 short(n[1]))
1217 short(n[1]))
1209 fetch[n[1]] = 1 # earliest unknown
1218 fetch[n[1]] = 1 # earliest unknown
1210 for p in n[2:4]:
1219 for p in n[2:4]:
1211 if p in m:
1220 if p in m:
1212 base[p] = 1 # latest known
1221 base[p] = 1 # latest known
1213
1222
1214 for p in n[2:4]:
1223 for p in n[2:4]:
1215 if p not in req and p not in m:
1224 if p not in req and p not in m:
1216 r.append(p)
1225 r.append(p)
1217 req[p] = 1
1226 req[p] = 1
1218 seen[n[0]] = 1
1227 seen[n[0]] = 1
1219
1228
1220 if r:
1229 if r:
1221 reqcnt += 1
1230 reqcnt += 1
1222 self.ui.debug(_("request %d: %s\n") %
1231 self.ui.debug(_("request %d: %s\n") %
1223 (reqcnt, " ".join(map(short, r))))
1232 (reqcnt, " ".join(map(short, r))))
1224 for p in xrange(0, len(r), 10):
1233 for p in xrange(0, len(r), 10):
1225 for b in remote.branches(r[p:p+10]):
1234 for b in remote.branches(r[p:p+10]):
1226 self.ui.debug(_("received %s:%s\n") %
1235 self.ui.debug(_("received %s:%s\n") %
1227 (short(b[0]), short(b[1])))
1236 (short(b[0]), short(b[1])))
1228 unknown.append(b)
1237 unknown.append(b)
1229
1238
1230 # do binary search on the branches we found
1239 # do binary search on the branches we found
1231 while search:
1240 while search:
1232 n = search.pop(0)
1241 n = search.pop(0)
1233 reqcnt += 1
1242 reqcnt += 1
1234 l = remote.between([(n[0], n[1])])[0]
1243 l = remote.between([(n[0], n[1])])[0]
1235 l.append(n[1])
1244 l.append(n[1])
1236 p = n[0]
1245 p = n[0]
1237 f = 1
1246 f = 1
1238 for i in l:
1247 for i in l:
1239 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1248 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1240 if i in m:
1249 if i in m:
1241 if f <= 2:
1250 if f <= 2:
1242 self.ui.debug(_("found new branch changeset %s\n") %
1251 self.ui.debug(_("found new branch changeset %s\n") %
1243 short(p))
1252 short(p))
1244 fetch[p] = 1
1253 fetch[p] = 1
1245 base[i] = 1
1254 base[i] = 1
1246 else:
1255 else:
1247 self.ui.debug(_("narrowed branch search to %s:%s\n")
1256 self.ui.debug(_("narrowed branch search to %s:%s\n")
1248 % (short(p), short(i)))
1257 % (short(p), short(i)))
1249 search.append((p, i))
1258 search.append((p, i))
1250 break
1259 break
1251 p, f = i, f * 2
1260 p, f = i, f * 2
1252
1261
1253 # sanity check our fetch list
1262 # sanity check our fetch list
1254 for f in fetch.keys():
1263 for f in fetch.keys():
1255 if f in m:
1264 if f in m:
1256 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1265 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1257
1266
1258 if base.keys() == [nullid]:
1267 if base.keys() == [nullid]:
1259 if force:
1268 if force:
1260 self.ui.warn(_("warning: repository is unrelated\n"))
1269 self.ui.warn(_("warning: repository is unrelated\n"))
1261 else:
1270 else:
1262 raise util.Abort(_("repository is unrelated"))
1271 raise util.Abort(_("repository is unrelated"))
1263
1272
1264 self.ui.debug(_("found new changesets starting at ") +
1273 self.ui.debug(_("found new changesets starting at ") +
1265 " ".join([short(f) for f in fetch]) + "\n")
1274 " ".join([short(f) for f in fetch]) + "\n")
1266
1275
1267 self.ui.debug(_("%d total queries\n") % reqcnt)
1276 self.ui.debug(_("%d total queries\n") % reqcnt)
1268
1277
1269 return fetch.keys()
1278 return fetch.keys()
1270
1279
1271 def findoutgoing(self, remote, base=None, heads=None, force=False):
1280 def findoutgoing(self, remote, base=None, heads=None, force=False):
1272 """Return list of nodes that are roots of subsets not in remote
1281 """Return list of nodes that are roots of subsets not in remote
1273
1282
1274 If base dict is specified, assume that these nodes and their parents
1283 If base dict is specified, assume that these nodes and their parents
1275 exist on the remote side.
1284 exist on the remote side.
1276 If a list of heads is specified, return only nodes which are heads
1285 If a list of heads is specified, return only nodes which are heads
1277 or ancestors of these heads, and return a second element which
1286 or ancestors of these heads, and return a second element which
1278 contains all remote heads which get new children.
1287 contains all remote heads which get new children.
1279 """
1288 """
1280 if base == None:
1289 if base == None:
1281 base = {}
1290 base = {}
1282 self.findincoming(remote, base, heads, force=force)
1291 self.findincoming(remote, base, heads, force=force)
1283
1292
1284 self.ui.debug(_("common changesets up to ")
1293 self.ui.debug(_("common changesets up to ")
1285 + " ".join(map(short, base.keys())) + "\n")
1294 + " ".join(map(short, base.keys())) + "\n")
1286
1295
1287 remain = dict.fromkeys(self.changelog.nodemap)
1296 remain = dict.fromkeys(self.changelog.nodemap)
1288
1297
1289 # prune everything remote has from the tree
1298 # prune everything remote has from the tree
1290 del remain[nullid]
1299 del remain[nullid]
1291 remove = base.keys()
1300 remove = base.keys()
1292 while remove:
1301 while remove:
1293 n = remove.pop(0)
1302 n = remove.pop(0)
1294 if n in remain:
1303 if n in remain:
1295 del remain[n]
1304 del remain[n]
1296 for p in self.changelog.parents(n):
1305 for p in self.changelog.parents(n):
1297 remove.append(p)
1306 remove.append(p)
1298
1307
1299 # find every node whose parents have been pruned
1308 # find every node whose parents have been pruned
1300 subset = []
1309 subset = []
1301 # find every remote head that will get new children
1310 # find every remote head that will get new children
1302 updated_heads = {}
1311 updated_heads = {}
1303 for n in remain:
1312 for n in remain:
1304 p1, p2 = self.changelog.parents(n)
1313 p1, p2 = self.changelog.parents(n)
1305 if p1 not in remain and p2 not in remain:
1314 if p1 not in remain and p2 not in remain:
1306 subset.append(n)
1315 subset.append(n)
1307 if heads:
1316 if heads:
1308 if p1 in heads:
1317 if p1 in heads:
1309 updated_heads[p1] = True
1318 updated_heads[p1] = True
1310 if p2 in heads:
1319 if p2 in heads:
1311 updated_heads[p2] = True
1320 updated_heads[p2] = True
1312
1321
1313 # this is the set of all roots we have to push
1322 # this is the set of all roots we have to push
1314 if heads:
1323 if heads:
1315 return subset, updated_heads.keys()
1324 return subset, updated_heads.keys()
1316 else:
1325 else:
1317 return subset
1326 return subset
1318
1327
1319 def pull(self, remote, heads=None, force=False, lock=None):
1328 def pull(self, remote, heads=None, force=False, lock=None):
1320 mylock = False
1329 mylock = False
1321 if not lock:
1330 if not lock:
1322 lock = self.lock()
1331 lock = self.lock()
1323 mylock = True
1332 mylock = True
1324
1333
1325 try:
1334 try:
1326 fetch = self.findincoming(remote, force=force)
1335 fetch = self.findincoming(remote, force=force)
1327 if fetch == [nullid]:
1336 if fetch == [nullid]:
1328 self.ui.status(_("requesting all changes\n"))
1337 self.ui.status(_("requesting all changes\n"))
1329
1338
1330 if not fetch:
1339 if not fetch:
1331 self.ui.status(_("no changes found\n"))
1340 self.ui.status(_("no changes found\n"))
1332 return 0
1341 return 0
1333
1342
1334 if heads is None:
1343 if heads is None:
1335 cg = remote.changegroup(fetch, 'pull')
1344 cg = remote.changegroup(fetch, 'pull')
1336 else:
1345 else:
1337 if 'changegroupsubset' not in remote.capabilities:
1346 if 'changegroupsubset' not in remote.capabilities:
1338 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1347 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1339 cg = remote.changegroupsubset(fetch, heads, 'pull')
1348 cg = remote.changegroupsubset(fetch, heads, 'pull')
1340 return self.addchangegroup(cg, 'pull', remote.url())
1349 return self.addchangegroup(cg, 'pull', remote.url())
1341 finally:
1350 finally:
1342 if mylock:
1351 if mylock:
1343 lock.release()
1352 lock.release()
1344
1353
1345 def push(self, remote, force=False, revs=None):
1354 def push(self, remote, force=False, revs=None):
1346 # there are two ways to push to remote repo:
1355 # there are two ways to push to remote repo:
1347 #
1356 #
1348 # addchangegroup assumes local user can lock remote
1357 # addchangegroup assumes local user can lock remote
1349 # repo (local filesystem, old ssh servers).
1358 # repo (local filesystem, old ssh servers).
1350 #
1359 #
1351 # unbundle assumes local user cannot lock remote repo (new ssh
1360 # unbundle assumes local user cannot lock remote repo (new ssh
1352 # servers, http servers).
1361 # servers, http servers).
1353
1362
1354 if remote.capable('unbundle'):
1363 if remote.capable('unbundle'):
1355 return self.push_unbundle(remote, force, revs)
1364 return self.push_unbundle(remote, force, revs)
1356 return self.push_addchangegroup(remote, force, revs)
1365 return self.push_addchangegroup(remote, force, revs)
1357
1366
1358 def prepush(self, remote, force, revs):
1367 def prepush(self, remote, force, revs):
1359 base = {}
1368 base = {}
1360 remote_heads = remote.heads()
1369 remote_heads = remote.heads()
1361 inc = self.findincoming(remote, base, remote_heads, force=force)
1370 inc = self.findincoming(remote, base, remote_heads, force=force)
1362
1371
1363 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1372 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1364 if revs is not None:
1373 if revs is not None:
1365 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1374 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1366 else:
1375 else:
1367 bases, heads = update, self.changelog.heads()
1376 bases, heads = update, self.changelog.heads()
1368
1377
1369 if not bases:
1378 if not bases:
1370 self.ui.status(_("no changes found\n"))
1379 self.ui.status(_("no changes found\n"))
1371 return None, 1
1380 return None, 1
1372 elif not force:
1381 elif not force:
1373 # check if we're creating new remote heads
1382 # check if we're creating new remote heads
1374 # to be a remote head after push, node must be either
1383 # to be a remote head after push, node must be either
1375 # - unknown locally
1384 # - unknown locally
1376 # - a local outgoing head descended from update
1385 # - a local outgoing head descended from update
1377 # - a remote head that's known locally and not
1386 # - a remote head that's known locally and not
1378 # ancestral to an outgoing head
1387 # ancestral to an outgoing head
1379
1388
1380 warn = 0
1389 warn = 0
1381
1390
1382 if remote_heads == [nullid]:
1391 if remote_heads == [nullid]:
1383 warn = 0
1392 warn = 0
1384 elif not revs and len(heads) > len(remote_heads):
1393 elif not revs and len(heads) > len(remote_heads):
1385 warn = 1
1394 warn = 1
1386 else:
1395 else:
1387 newheads = list(heads)
1396 newheads = list(heads)
1388 for r in remote_heads:
1397 for r in remote_heads:
1389 if r in self.changelog.nodemap:
1398 if r in self.changelog.nodemap:
1390 desc = self.changelog.heads(r, heads)
1399 desc = self.changelog.heads(r, heads)
1391 l = [h for h in heads if h in desc]
1400 l = [h for h in heads if h in desc]
1392 if not l:
1401 if not l:
1393 newheads.append(r)
1402 newheads.append(r)
1394 else:
1403 else:
1395 newheads.append(r)
1404 newheads.append(r)
1396 if len(newheads) > len(remote_heads):
1405 if len(newheads) > len(remote_heads):
1397 warn = 1
1406 warn = 1
1398
1407
1399 if warn:
1408 if warn:
1400 self.ui.warn(_("abort: push creates new remote branches!\n"))
1409 self.ui.warn(_("abort: push creates new remote branches!\n"))
1401 self.ui.status(_("(did you forget to merge?"
1410 self.ui.status(_("(did you forget to merge?"
1402 " use push -f to force)\n"))
1411 " use push -f to force)\n"))
1403 return None, 1
1412 return None, 1
1404 elif inc:
1413 elif inc:
1405 self.ui.warn(_("note: unsynced remote changes!\n"))
1414 self.ui.warn(_("note: unsynced remote changes!\n"))
1406
1415
1407
1416
1408 if revs is None:
1417 if revs is None:
1409 cg = self.changegroup(update, 'push')
1418 cg = self.changegroup(update, 'push')
1410 else:
1419 else:
1411 cg = self.changegroupsubset(update, revs, 'push')
1420 cg = self.changegroupsubset(update, revs, 'push')
1412 return cg, remote_heads
1421 return cg, remote_heads
1413
1422
1414 def push_addchangegroup(self, remote, force, revs):
1423 def push_addchangegroup(self, remote, force, revs):
1415 lock = remote.lock()
1424 lock = remote.lock()
1416
1425
1417 ret = self.prepush(remote, force, revs)
1426 ret = self.prepush(remote, force, revs)
1418 if ret[0] is not None:
1427 if ret[0] is not None:
1419 cg, remote_heads = ret
1428 cg, remote_heads = ret
1420 return remote.addchangegroup(cg, 'push', self.url())
1429 return remote.addchangegroup(cg, 'push', self.url())
1421 return ret[1]
1430 return ret[1]
1422
1431
1423 def push_unbundle(self, remote, force, revs):
1432 def push_unbundle(self, remote, force, revs):
1424 # local repo finds heads on server, finds out what revs it
1433 # local repo finds heads on server, finds out what revs it
1425 # must push. once revs transferred, if server finds it has
1434 # must push. once revs transferred, if server finds it has
1426 # different heads (someone else won commit/push race), server
1435 # different heads (someone else won commit/push race), server
1427 # aborts.
1436 # aborts.
1428
1437
1429 ret = self.prepush(remote, force, revs)
1438 ret = self.prepush(remote, force, revs)
1430 if ret[0] is not None:
1439 if ret[0] is not None:
1431 cg, remote_heads = ret
1440 cg, remote_heads = ret
1432 if force: remote_heads = ['force']
1441 if force: remote_heads = ['force']
1433 return remote.unbundle(cg, remote_heads, 'push')
1442 return remote.unbundle(cg, remote_heads, 'push')
1434 return ret[1]
1443 return ret[1]
1435
1444
1436 def changegroupinfo(self, nodes):
1445 def changegroupinfo(self, nodes):
1437 self.ui.note(_("%d changesets found\n") % len(nodes))
1446 self.ui.note(_("%d changesets found\n") % len(nodes))
1438 if self.ui.debugflag:
1447 if self.ui.debugflag:
1439 self.ui.debug(_("List of changesets:\n"))
1448 self.ui.debug(_("List of changesets:\n"))
1440 for node in nodes:
1449 for node in nodes:
1441 self.ui.debug("%s\n" % hex(node))
1450 self.ui.debug("%s\n" % hex(node))
1442
1451
1443 def changegroupsubset(self, bases, heads, source):
1452 def changegroupsubset(self, bases, heads, source):
1444 """This function generates a changegroup consisting of all the nodes
1453 """This function generates a changegroup consisting of all the nodes
1445 that are descendents of any of the bases, and ancestors of any of
1454 that are descendents of any of the bases, and ancestors of any of
1446 the heads.
1455 the heads.
1447
1456
1448 It is fairly complex as determining which filenodes and which
1457 It is fairly complex as determining which filenodes and which
1449 manifest nodes need to be included for the changeset to be complete
1458 manifest nodes need to be included for the changeset to be complete
1450 is non-trivial.
1459 is non-trivial.
1451
1460
1452 Another wrinkle is doing the reverse, figuring out which changeset in
1461 Another wrinkle is doing the reverse, figuring out which changeset in
1453 the changegroup a particular filenode or manifestnode belongs to."""
1462 the changegroup a particular filenode or manifestnode belongs to."""
1454
1463
1455 self.hook('preoutgoing', throw=True, source=source)
1464 self.hook('preoutgoing', throw=True, source=source)
1456
1465
1457 # Set up some initial variables
1466 # Set up some initial variables
1458 # Make it easy to refer to self.changelog
1467 # Make it easy to refer to self.changelog
1459 cl = self.changelog
1468 cl = self.changelog
1460 # msng is short for missing - compute the list of changesets in this
1469 # msng is short for missing - compute the list of changesets in this
1461 # changegroup.
1470 # changegroup.
1462 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1471 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1463 self.changegroupinfo(msng_cl_lst)
1472 self.changegroupinfo(msng_cl_lst)
1464 # Some bases may turn out to be superfluous, and some heads may be
1473 # Some bases may turn out to be superfluous, and some heads may be
1465 # too. nodesbetween will return the minimal set of bases and heads
1474 # too. nodesbetween will return the minimal set of bases and heads
1466 # necessary to re-create the changegroup.
1475 # necessary to re-create the changegroup.
1467
1476
1468 # Known heads are the list of heads that it is assumed the recipient
1477 # Known heads are the list of heads that it is assumed the recipient
1469 # of this changegroup will know about.
1478 # of this changegroup will know about.
1470 knownheads = {}
1479 knownheads = {}
1471 # We assume that all parents of bases are known heads.
1480 # We assume that all parents of bases are known heads.
1472 for n in bases:
1481 for n in bases:
1473 for p in cl.parents(n):
1482 for p in cl.parents(n):
1474 if p != nullid:
1483 if p != nullid:
1475 knownheads[p] = 1
1484 knownheads[p] = 1
1476 knownheads = knownheads.keys()
1485 knownheads = knownheads.keys()
1477 if knownheads:
1486 if knownheads:
1478 # Now that we know what heads are known, we can compute which
1487 # Now that we know what heads are known, we can compute which
1479 # changesets are known. The recipient must know about all
1488 # changesets are known. The recipient must know about all
1480 # changesets required to reach the known heads from the null
1489 # changesets required to reach the known heads from the null
1481 # changeset.
1490 # changeset.
1482 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1491 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1483 junk = None
1492 junk = None
1484 # Transform the list into an ersatz set.
1493 # Transform the list into an ersatz set.
1485 has_cl_set = dict.fromkeys(has_cl_set)
1494 has_cl_set = dict.fromkeys(has_cl_set)
1486 else:
1495 else:
1487 # If there were no known heads, the recipient cannot be assumed to
1496 # If there were no known heads, the recipient cannot be assumed to
1488 # know about any changesets.
1497 # know about any changesets.
1489 has_cl_set = {}
1498 has_cl_set = {}
1490
1499
1491 # Make it easy to refer to self.manifest
1500 # Make it easy to refer to self.manifest
1492 mnfst = self.manifest
1501 mnfst = self.manifest
1493 # We don't know which manifests are missing yet
1502 # We don't know which manifests are missing yet
1494 msng_mnfst_set = {}
1503 msng_mnfst_set = {}
1495 # Nor do we know which filenodes are missing.
1504 # Nor do we know which filenodes are missing.
1496 msng_filenode_set = {}
1505 msng_filenode_set = {}
1497
1506
1498 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1507 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1499 junk = None
1508 junk = None
1500
1509
1501 # A changeset always belongs to itself, so the changenode lookup
1510 # A changeset always belongs to itself, so the changenode lookup
1502 # function for a changenode is identity.
1511 # function for a changenode is identity.
1503 def identity(x):
1512 def identity(x):
1504 return x
1513 return x
1505
1514
1506 # A function generating function. Sets up an environment for the
1515 # A function generating function. Sets up an environment for the
1507 # inner function.
1516 # inner function.
1508 def cmp_by_rev_func(revlog):
1517 def cmp_by_rev_func(revlog):
1509 # Compare two nodes by their revision number in the environment's
1518 # Compare two nodes by their revision number in the environment's
1510 # revision history. Since the revision number both represents the
1519 # revision history. Since the revision number both represents the
1511 # most efficient order to read the nodes in, and represents a
1520 # most efficient order to read the nodes in, and represents a
1512 # topological sorting of the nodes, this function is often useful.
1521 # topological sorting of the nodes, this function is often useful.
1513 def cmp_by_rev(a, b):
1522 def cmp_by_rev(a, b):
1514 return cmp(revlog.rev(a), revlog.rev(b))
1523 return cmp(revlog.rev(a), revlog.rev(b))
1515 return cmp_by_rev
1524 return cmp_by_rev
1516
1525
1517 # If we determine that a particular file or manifest node must be a
1526 # If we determine that a particular file or manifest node must be a
1518 # node that the recipient of the changegroup will already have, we can
1527 # node that the recipient of the changegroup will already have, we can
1519 # also assume the recipient will have all the parents. This function
1528 # also assume the recipient will have all the parents. This function
1520 # prunes them from the set of missing nodes.
1529 # prunes them from the set of missing nodes.
1521 def prune_parents(revlog, hasset, msngset):
1530 def prune_parents(revlog, hasset, msngset):
1522 haslst = hasset.keys()
1531 haslst = hasset.keys()
1523 haslst.sort(cmp_by_rev_func(revlog))
1532 haslst.sort(cmp_by_rev_func(revlog))
1524 for node in haslst:
1533 for node in haslst:
1525 parentlst = [p for p in revlog.parents(node) if p != nullid]
1534 parentlst = [p for p in revlog.parents(node) if p != nullid]
1526 while parentlst:
1535 while parentlst:
1527 n = parentlst.pop()
1536 n = parentlst.pop()
1528 if n not in hasset:
1537 if n not in hasset:
1529 hasset[n] = 1
1538 hasset[n] = 1
1530 p = [p for p in revlog.parents(n) if p != nullid]
1539 p = [p for p in revlog.parents(n) if p != nullid]
1531 parentlst.extend(p)
1540 parentlst.extend(p)
1532 for n in hasset:
1541 for n in hasset:
1533 msngset.pop(n, None)
1542 msngset.pop(n, None)
1534
1543
1535 # This is a function generating function used to set up an environment
1544 # This is a function generating function used to set up an environment
1536 # for the inner function to execute in.
1545 # for the inner function to execute in.
1537 def manifest_and_file_collector(changedfileset):
1546 def manifest_and_file_collector(changedfileset):
1538 # This is an information gathering function that gathers
1547 # This is an information gathering function that gathers
1539 # information from each changeset node that goes out as part of
1548 # information from each changeset node that goes out as part of
1540 # the changegroup. The information gathered is a list of which
1549 # the changegroup. The information gathered is a list of which
1541 # manifest nodes are potentially required (the recipient may
1550 # manifest nodes are potentially required (the recipient may
1542 # already have them) and total list of all files which were
1551 # already have them) and total list of all files which were
1543 # changed in any changeset in the changegroup.
1552 # changed in any changeset in the changegroup.
1544 #
1553 #
1545 # We also remember the first changenode we saw any manifest
1554 # We also remember the first changenode we saw any manifest
1546 # referenced by so we can later determine which changenode 'owns'
1555 # referenced by so we can later determine which changenode 'owns'
1547 # the manifest.
1556 # the manifest.
1548 def collect_manifests_and_files(clnode):
1557 def collect_manifests_and_files(clnode):
1549 c = cl.read(clnode)
1558 c = cl.read(clnode)
1550 for f in c[3]:
1559 for f in c[3]:
1551 # This is to make sure we only have one instance of each
1560 # This is to make sure we only have one instance of each
1552 # filename string for each filename.
1561 # filename string for each filename.
1553 changedfileset.setdefault(f, f)
1562 changedfileset.setdefault(f, f)
1554 msng_mnfst_set.setdefault(c[0], clnode)
1563 msng_mnfst_set.setdefault(c[0], clnode)
1555 return collect_manifests_and_files
1564 return collect_manifests_and_files
1556
1565
1557 # Figure out which manifest nodes (of the ones we think might be part
1566 # Figure out which manifest nodes (of the ones we think might be part
1558 # of the changegroup) the recipient must know about and remove them
1567 # of the changegroup) the recipient must know about and remove them
1559 # from the changegroup.
1568 # from the changegroup.
1560 def prune_manifests():
1569 def prune_manifests():
1561 has_mnfst_set = {}
1570 has_mnfst_set = {}
1562 for n in msng_mnfst_set:
1571 for n in msng_mnfst_set:
1563 # If a 'missing' manifest thinks it belongs to a changenode
1572 # If a 'missing' manifest thinks it belongs to a changenode
1564 # the recipient is assumed to have, obviously the recipient
1573 # the recipient is assumed to have, obviously the recipient
1565 # must have that manifest.
1574 # must have that manifest.
1566 linknode = cl.node(mnfst.linkrev(n))
1575 linknode = cl.node(mnfst.linkrev(n))
1567 if linknode in has_cl_set:
1576 if linknode in has_cl_set:
1568 has_mnfst_set[n] = 1
1577 has_mnfst_set[n] = 1
1569 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1578 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1570
1579
1571 # Use the information collected in collect_manifests_and_files to say
1580 # Use the information collected in collect_manifests_and_files to say
1572 # which changenode any manifestnode belongs to.
1581 # which changenode any manifestnode belongs to.
1573 def lookup_manifest_link(mnfstnode):
1582 def lookup_manifest_link(mnfstnode):
1574 return msng_mnfst_set[mnfstnode]
1583 return msng_mnfst_set[mnfstnode]
1575
1584
1576 # A function generating function that sets up the initial environment
1585 # A function generating function that sets up the initial environment
1577 # the inner function.
1586 # the inner function.
1578 def filenode_collector(changedfiles):
1587 def filenode_collector(changedfiles):
1579 next_rev = [0]
1588 next_rev = [0]
1580 # This gathers information from each manifestnode included in the
1589 # This gathers information from each manifestnode included in the
1581 # changegroup about which filenodes the manifest node references
1590 # changegroup about which filenodes the manifest node references
1582 # so we can include those in the changegroup too.
1591 # so we can include those in the changegroup too.
1583 #
1592 #
1584 # It also remembers which changenode each filenode belongs to. It
1593 # It also remembers which changenode each filenode belongs to. It
1585 # does this by assuming the a filenode belongs to the changenode
1594 # does this by assuming the a filenode belongs to the changenode
1586 # the first manifest that references it belongs to.
1595 # the first manifest that references it belongs to.
1587 def collect_msng_filenodes(mnfstnode):
1596 def collect_msng_filenodes(mnfstnode):
1588 r = mnfst.rev(mnfstnode)
1597 r = mnfst.rev(mnfstnode)
1589 if r == next_rev[0]:
1598 if r == next_rev[0]:
1590 # If the last rev we looked at was the one just previous,
1599 # If the last rev we looked at was the one just previous,
1591 # we only need to see a diff.
1600 # we only need to see a diff.
1592 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1601 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1593 # For each line in the delta
1602 # For each line in the delta
1594 for dline in delta.splitlines():
1603 for dline in delta.splitlines():
1595 # get the filename and filenode for that line
1604 # get the filename and filenode for that line
1596 f, fnode = dline.split('\0')
1605 f, fnode = dline.split('\0')
1597 fnode = bin(fnode[:40])
1606 fnode = bin(fnode[:40])
1598 f = changedfiles.get(f, None)
1607 f = changedfiles.get(f, None)
1599 # And if the file is in the list of files we care
1608 # And if the file is in the list of files we care
1600 # about.
1609 # about.
1601 if f is not None:
1610 if f is not None:
1602 # Get the changenode this manifest belongs to
1611 # Get the changenode this manifest belongs to
1603 clnode = msng_mnfst_set[mnfstnode]
1612 clnode = msng_mnfst_set[mnfstnode]
1604 # Create the set of filenodes for the file if
1613 # Create the set of filenodes for the file if
1605 # there isn't one already.
1614 # there isn't one already.
1606 ndset = msng_filenode_set.setdefault(f, {})
1615 ndset = msng_filenode_set.setdefault(f, {})
1607 # And set the filenode's changelog node to the
1616 # And set the filenode's changelog node to the
1608 # manifest's if it hasn't been set already.
1617 # manifest's if it hasn't been set already.
1609 ndset.setdefault(fnode, clnode)
1618 ndset.setdefault(fnode, clnode)
1610 else:
1619 else:
1611 # Otherwise we need a full manifest.
1620 # Otherwise we need a full manifest.
1612 m = mnfst.read(mnfstnode)
1621 m = mnfst.read(mnfstnode)
1613 # For every file in we care about.
1622 # For every file in we care about.
1614 for f in changedfiles:
1623 for f in changedfiles:
1615 fnode = m.get(f, None)
1624 fnode = m.get(f, None)
1616 # If it's in the manifest
1625 # If it's in the manifest
1617 if fnode is not None:
1626 if fnode is not None:
1618 # See comments above.
1627 # See comments above.
1619 clnode = msng_mnfst_set[mnfstnode]
1628 clnode = msng_mnfst_set[mnfstnode]
1620 ndset = msng_filenode_set.setdefault(f, {})
1629 ndset = msng_filenode_set.setdefault(f, {})
1621 ndset.setdefault(fnode, clnode)
1630 ndset.setdefault(fnode, clnode)
1622 # Remember the revision we hope to see next.
1631 # Remember the revision we hope to see next.
1623 next_rev[0] = r + 1
1632 next_rev[0] = r + 1
1624 return collect_msng_filenodes
1633 return collect_msng_filenodes
1625
1634
1626 # We have a list of filenodes we think we need for a file, lets remove
1635 # We have a list of filenodes we think we need for a file, lets remove
1627 # all those we now the recipient must have.
1636 # all those we now the recipient must have.
1628 def prune_filenodes(f, filerevlog):
1637 def prune_filenodes(f, filerevlog):
1629 msngset = msng_filenode_set[f]
1638 msngset = msng_filenode_set[f]
1630 hasset = {}
1639 hasset = {}
1631 # If a 'missing' filenode thinks it belongs to a changenode we
1640 # If a 'missing' filenode thinks it belongs to a changenode we
1632 # assume the recipient must have, then the recipient must have
1641 # assume the recipient must have, then the recipient must have
1633 # that filenode.
1642 # that filenode.
1634 for n in msngset:
1643 for n in msngset:
1635 clnode = cl.node(filerevlog.linkrev(n))
1644 clnode = cl.node(filerevlog.linkrev(n))
1636 if clnode in has_cl_set:
1645 if clnode in has_cl_set:
1637 hasset[n] = 1
1646 hasset[n] = 1
1638 prune_parents(filerevlog, hasset, msngset)
1647 prune_parents(filerevlog, hasset, msngset)
1639
1648
1640 # A function generator function that sets up the a context for the
1649 # A function generator function that sets up the a context for the
1641 # inner function.
1650 # inner function.
1642 def lookup_filenode_link_func(fname):
1651 def lookup_filenode_link_func(fname):
1643 msngset = msng_filenode_set[fname]
1652 msngset = msng_filenode_set[fname]
1644 # Lookup the changenode the filenode belongs to.
1653 # Lookup the changenode the filenode belongs to.
1645 def lookup_filenode_link(fnode):
1654 def lookup_filenode_link(fnode):
1646 return msngset[fnode]
1655 return msngset[fnode]
1647 return lookup_filenode_link
1656 return lookup_filenode_link
1648
1657
1649 # Now that we have all theses utility functions to help out and
1658 # Now that we have all theses utility functions to help out and
1650 # logically divide up the task, generate the group.
1659 # logically divide up the task, generate the group.
1651 def gengroup():
1660 def gengroup():
1652 # The set of changed files starts empty.
1661 # The set of changed files starts empty.
1653 changedfiles = {}
1662 changedfiles = {}
1654 # Create a changenode group generator that will call our functions
1663 # Create a changenode group generator that will call our functions
1655 # back to lookup the owning changenode and collect information.
1664 # back to lookup the owning changenode and collect information.
1656 group = cl.group(msng_cl_lst, identity,
1665 group = cl.group(msng_cl_lst, identity,
1657 manifest_and_file_collector(changedfiles))
1666 manifest_and_file_collector(changedfiles))
1658 for chnk in group:
1667 for chnk in group:
1659 yield chnk
1668 yield chnk
1660
1669
1661 # The list of manifests has been collected by the generator
1670 # The list of manifests has been collected by the generator
1662 # calling our functions back.
1671 # calling our functions back.
1663 prune_manifests()
1672 prune_manifests()
1664 msng_mnfst_lst = msng_mnfst_set.keys()
1673 msng_mnfst_lst = msng_mnfst_set.keys()
1665 # Sort the manifestnodes by revision number.
1674 # Sort the manifestnodes by revision number.
1666 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1675 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1667 # Create a generator for the manifestnodes that calls our lookup
1676 # Create a generator for the manifestnodes that calls our lookup
1668 # and data collection functions back.
1677 # and data collection functions back.
1669 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1678 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1670 filenode_collector(changedfiles))
1679 filenode_collector(changedfiles))
1671 for chnk in group:
1680 for chnk in group:
1672 yield chnk
1681 yield chnk
1673
1682
1674 # These are no longer needed, dereference and toss the memory for
1683 # These are no longer needed, dereference and toss the memory for
1675 # them.
1684 # them.
1676 msng_mnfst_lst = None
1685 msng_mnfst_lst = None
1677 msng_mnfst_set.clear()
1686 msng_mnfst_set.clear()
1678
1687
1679 changedfiles = changedfiles.keys()
1688 changedfiles = changedfiles.keys()
1680 changedfiles.sort()
1689 changedfiles.sort()
1681 # Go through all our files in order sorted by name.
1690 # Go through all our files in order sorted by name.
1682 for fname in changedfiles:
1691 for fname in changedfiles:
1683 filerevlog = self.file(fname)
1692 filerevlog = self.file(fname)
1684 # Toss out the filenodes that the recipient isn't really
1693 # Toss out the filenodes that the recipient isn't really
1685 # missing.
1694 # missing.
1686 if msng_filenode_set.has_key(fname):
1695 if msng_filenode_set.has_key(fname):
1687 prune_filenodes(fname, filerevlog)
1696 prune_filenodes(fname, filerevlog)
1688 msng_filenode_lst = msng_filenode_set[fname].keys()
1697 msng_filenode_lst = msng_filenode_set[fname].keys()
1689 else:
1698 else:
1690 msng_filenode_lst = []
1699 msng_filenode_lst = []
1691 # If any filenodes are left, generate the group for them,
1700 # If any filenodes are left, generate the group for them,
1692 # otherwise don't bother.
1701 # otherwise don't bother.
1693 if len(msng_filenode_lst) > 0:
1702 if len(msng_filenode_lst) > 0:
1694 yield changegroup.genchunk(fname)
1703 yield changegroup.genchunk(fname)
1695 # Sort the filenodes by their revision #
1704 # Sort the filenodes by their revision #
1696 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1705 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1697 # Create a group generator and only pass in a changenode
1706 # Create a group generator and only pass in a changenode
1698 # lookup function as we need to collect no information
1707 # lookup function as we need to collect no information
1699 # from filenodes.
1708 # from filenodes.
1700 group = filerevlog.group(msng_filenode_lst,
1709 group = filerevlog.group(msng_filenode_lst,
1701 lookup_filenode_link_func(fname))
1710 lookup_filenode_link_func(fname))
1702 for chnk in group:
1711 for chnk in group:
1703 yield chnk
1712 yield chnk
1704 if msng_filenode_set.has_key(fname):
1713 if msng_filenode_set.has_key(fname):
1705 # Don't need this anymore, toss it to free memory.
1714 # Don't need this anymore, toss it to free memory.
1706 del msng_filenode_set[fname]
1715 del msng_filenode_set[fname]
1707 # Signal that no more groups are left.
1716 # Signal that no more groups are left.
1708 yield changegroup.closechunk()
1717 yield changegroup.closechunk()
1709
1718
1710 if msng_cl_lst:
1719 if msng_cl_lst:
1711 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1720 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1712
1721
1713 return util.chunkbuffer(gengroup())
1722 return util.chunkbuffer(gengroup())
1714
1723
1715 def changegroup(self, basenodes, source):
1724 def changegroup(self, basenodes, source):
1716 """Generate a changegroup of all nodes that we have that a recipient
1725 """Generate a changegroup of all nodes that we have that a recipient
1717 doesn't.
1726 doesn't.
1718
1727
1719 This is much easier than the previous function as we can assume that
1728 This is much easier than the previous function as we can assume that
1720 the recipient has any changenode we aren't sending them."""
1729 the recipient has any changenode we aren't sending them."""
1721
1730
1722 self.hook('preoutgoing', throw=True, source=source)
1731 self.hook('preoutgoing', throw=True, source=source)
1723
1732
1724 cl = self.changelog
1733 cl = self.changelog
1725 nodes = cl.nodesbetween(basenodes, None)[0]
1734 nodes = cl.nodesbetween(basenodes, None)[0]
1726 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1735 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1727 self.changegroupinfo(nodes)
1736 self.changegroupinfo(nodes)
1728
1737
1729 def identity(x):
1738 def identity(x):
1730 return x
1739 return x
1731
1740
1732 def gennodelst(revlog):
1741 def gennodelst(revlog):
1733 for r in xrange(0, revlog.count()):
1742 for r in xrange(0, revlog.count()):
1734 n = revlog.node(r)
1743 n = revlog.node(r)
1735 if revlog.linkrev(n) in revset:
1744 if revlog.linkrev(n) in revset:
1736 yield n
1745 yield n
1737
1746
1738 def changed_file_collector(changedfileset):
1747 def changed_file_collector(changedfileset):
1739 def collect_changed_files(clnode):
1748 def collect_changed_files(clnode):
1740 c = cl.read(clnode)
1749 c = cl.read(clnode)
1741 for fname in c[3]:
1750 for fname in c[3]:
1742 changedfileset[fname] = 1
1751 changedfileset[fname] = 1
1743 return collect_changed_files
1752 return collect_changed_files
1744
1753
1745 def lookuprevlink_func(revlog):
1754 def lookuprevlink_func(revlog):
1746 def lookuprevlink(n):
1755 def lookuprevlink(n):
1747 return cl.node(revlog.linkrev(n))
1756 return cl.node(revlog.linkrev(n))
1748 return lookuprevlink
1757 return lookuprevlink
1749
1758
1750 def gengroup():
1759 def gengroup():
1751 # construct a list of all changed files
1760 # construct a list of all changed files
1752 changedfiles = {}
1761 changedfiles = {}
1753
1762
1754 for chnk in cl.group(nodes, identity,
1763 for chnk in cl.group(nodes, identity,
1755 changed_file_collector(changedfiles)):
1764 changed_file_collector(changedfiles)):
1756 yield chnk
1765 yield chnk
1757 changedfiles = changedfiles.keys()
1766 changedfiles = changedfiles.keys()
1758 changedfiles.sort()
1767 changedfiles.sort()
1759
1768
1760 mnfst = self.manifest
1769 mnfst = self.manifest
1761 nodeiter = gennodelst(mnfst)
1770 nodeiter = gennodelst(mnfst)
1762 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1771 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1763 yield chnk
1772 yield chnk
1764
1773
1765 for fname in changedfiles:
1774 for fname in changedfiles:
1766 filerevlog = self.file(fname)
1775 filerevlog = self.file(fname)
1767 nodeiter = gennodelst(filerevlog)
1776 nodeiter = gennodelst(filerevlog)
1768 nodeiter = list(nodeiter)
1777 nodeiter = list(nodeiter)
1769 if nodeiter:
1778 if nodeiter:
1770 yield changegroup.genchunk(fname)
1779 yield changegroup.genchunk(fname)
1771 lookup = lookuprevlink_func(filerevlog)
1780 lookup = lookuprevlink_func(filerevlog)
1772 for chnk in filerevlog.group(nodeiter, lookup):
1781 for chnk in filerevlog.group(nodeiter, lookup):
1773 yield chnk
1782 yield chnk
1774
1783
1775 yield changegroup.closechunk()
1784 yield changegroup.closechunk()
1776
1785
1777 if nodes:
1786 if nodes:
1778 self.hook('outgoing', node=hex(nodes[0]), source=source)
1787 self.hook('outgoing', node=hex(nodes[0]), source=source)
1779
1788
1780 return util.chunkbuffer(gengroup())
1789 return util.chunkbuffer(gengroup())
1781
1790
1782 def addchangegroup(self, source, srctype, url):
1791 def addchangegroup(self, source, srctype, url):
1783 """add changegroup to repo.
1792 """add changegroup to repo.
1784
1793
1785 return values:
1794 return values:
1786 - nothing changed or no source: 0
1795 - nothing changed or no source: 0
1787 - more heads than before: 1+added heads (2..n)
1796 - more heads than before: 1+added heads (2..n)
1788 - less heads than before: -1-removed heads (-2..-n)
1797 - less heads than before: -1-removed heads (-2..-n)
1789 - number of heads stays the same: 1
1798 - number of heads stays the same: 1
1790 """
1799 """
1791 def csmap(x):
1800 def csmap(x):
1792 self.ui.debug(_("add changeset %s\n") % short(x))
1801 self.ui.debug(_("add changeset %s\n") % short(x))
1793 return cl.count()
1802 return cl.count()
1794
1803
1795 def revmap(x):
1804 def revmap(x):
1796 return cl.rev(x)
1805 return cl.rev(x)
1797
1806
1798 if not source:
1807 if not source:
1799 return 0
1808 return 0
1800
1809
1801 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1810 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1802
1811
1803 changesets = files = revisions = 0
1812 changesets = files = revisions = 0
1804
1813
1805 tr = self.transaction()
1814 tr = self.transaction()
1806
1815
1807 # write changelog data to temp files so concurrent readers will not see
1816 # write changelog data to temp files so concurrent readers will not see
1808 # inconsistent view
1817 # inconsistent view
1809 cl = self.changelog
1818 cl = self.changelog
1810 cl.delayupdate()
1819 cl.delayupdate()
1811 oldheads = len(cl.heads())
1820 oldheads = len(cl.heads())
1812
1821
1813 # pull off the changeset group
1822 # pull off the changeset group
1814 self.ui.status(_("adding changesets\n"))
1823 self.ui.status(_("adding changesets\n"))
1815 cor = cl.count() - 1
1824 cor = cl.count() - 1
1816 chunkiter = changegroup.chunkiter(source)
1825 chunkiter = changegroup.chunkiter(source)
1817 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1826 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1818 raise util.Abort(_("received changelog group is empty"))
1827 raise util.Abort(_("received changelog group is empty"))
1819 cnr = cl.count() - 1
1828 cnr = cl.count() - 1
1820 changesets = cnr - cor
1829 changesets = cnr - cor
1821
1830
1822 # pull off the manifest group
1831 # pull off the manifest group
1823 self.ui.status(_("adding manifests\n"))
1832 self.ui.status(_("adding manifests\n"))
1824 chunkiter = changegroup.chunkiter(source)
1833 chunkiter = changegroup.chunkiter(source)
1825 # no need to check for empty manifest group here:
1834 # no need to check for empty manifest group here:
1826 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1835 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1827 # no new manifest will be created and the manifest group will
1836 # no new manifest will be created and the manifest group will
1828 # be empty during the pull
1837 # be empty during the pull
1829 self.manifest.addgroup(chunkiter, revmap, tr)
1838 self.manifest.addgroup(chunkiter, revmap, tr)
1830
1839
1831 # process the files
1840 # process the files
1832 self.ui.status(_("adding file changes\n"))
1841 self.ui.status(_("adding file changes\n"))
1833 while 1:
1842 while 1:
1834 f = changegroup.getchunk(source)
1843 f = changegroup.getchunk(source)
1835 if not f:
1844 if not f:
1836 break
1845 break
1837 self.ui.debug(_("adding %s revisions\n") % f)
1846 self.ui.debug(_("adding %s revisions\n") % f)
1838 fl = self.file(f)
1847 fl = self.file(f)
1839 o = fl.count()
1848 o = fl.count()
1840 chunkiter = changegroup.chunkiter(source)
1849 chunkiter = changegroup.chunkiter(source)
1841 if fl.addgroup(chunkiter, revmap, tr) is None:
1850 if fl.addgroup(chunkiter, revmap, tr) is None:
1842 raise util.Abort(_("received file revlog group is empty"))
1851 raise util.Abort(_("received file revlog group is empty"))
1843 revisions += fl.count() - o
1852 revisions += fl.count() - o
1844 files += 1
1853 files += 1
1845
1854
1846 # make changelog see real files again
1855 # make changelog see real files again
1847 cl.finalize(tr)
1856 cl.finalize(tr)
1848
1857
1849 newheads = len(self.changelog.heads())
1858 newheads = len(self.changelog.heads())
1850 heads = ""
1859 heads = ""
1851 if oldheads and newheads != oldheads:
1860 if oldheads and newheads != oldheads:
1852 heads = _(" (%+d heads)") % (newheads - oldheads)
1861 heads = _(" (%+d heads)") % (newheads - oldheads)
1853
1862
1854 self.ui.status(_("added %d changesets"
1863 self.ui.status(_("added %d changesets"
1855 " with %d changes to %d files%s\n")
1864 " with %d changes to %d files%s\n")
1856 % (changesets, revisions, files, heads))
1865 % (changesets, revisions, files, heads))
1857
1866
1858 if changesets > 0:
1867 if changesets > 0:
1859 self.hook('pretxnchangegroup', throw=True,
1868 self.hook('pretxnchangegroup', throw=True,
1860 node=hex(self.changelog.node(cor+1)), source=srctype,
1869 node=hex(self.changelog.node(cor+1)), source=srctype,
1861 url=url)
1870 url=url)
1862
1871
1863 tr.close()
1872 tr.close()
1864
1873
1865 if changesets > 0:
1874 if changesets > 0:
1866 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1875 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1867 source=srctype, url=url)
1876 source=srctype, url=url)
1868
1877
1869 for i in xrange(cor + 1, cnr + 1):
1878 for i in xrange(cor + 1, cnr + 1):
1870 self.hook("incoming", node=hex(self.changelog.node(i)),
1879 self.hook("incoming", node=hex(self.changelog.node(i)),
1871 source=srctype, url=url)
1880 source=srctype, url=url)
1872
1881
1873 # never return 0 here:
1882 # never return 0 here:
1874 if newheads < oldheads:
1883 if newheads < oldheads:
1875 return newheads - oldheads - 1
1884 return newheads - oldheads - 1
1876 else:
1885 else:
1877 return newheads - oldheads + 1
1886 return newheads - oldheads + 1
1878
1887
1879
1888
1880 def stream_in(self, remote):
1889 def stream_in(self, remote):
1881 fp = remote.stream_out()
1890 fp = remote.stream_out()
1882 l = fp.readline()
1891 l = fp.readline()
1883 try:
1892 try:
1884 resp = int(l)
1893 resp = int(l)
1885 except ValueError:
1894 except ValueError:
1886 raise util.UnexpectedOutput(
1895 raise util.UnexpectedOutput(
1887 _('Unexpected response from remote server:'), l)
1896 _('Unexpected response from remote server:'), l)
1888 if resp == 1:
1897 if resp == 1:
1889 raise util.Abort(_('operation forbidden by server'))
1898 raise util.Abort(_('operation forbidden by server'))
1890 elif resp == 2:
1899 elif resp == 2:
1891 raise util.Abort(_('locking the remote repository failed'))
1900 raise util.Abort(_('locking the remote repository failed'))
1892 elif resp != 0:
1901 elif resp != 0:
1893 raise util.Abort(_('the server sent an unknown error code'))
1902 raise util.Abort(_('the server sent an unknown error code'))
1894 self.ui.status(_('streaming all changes\n'))
1903 self.ui.status(_('streaming all changes\n'))
1895 l = fp.readline()
1904 l = fp.readline()
1896 try:
1905 try:
1897 total_files, total_bytes = map(int, l.split(' ', 1))
1906 total_files, total_bytes = map(int, l.split(' ', 1))
1898 except ValueError, TypeError:
1907 except ValueError, TypeError:
1899 raise util.UnexpectedOutput(
1908 raise util.UnexpectedOutput(
1900 _('Unexpected response from remote server:'), l)
1909 _('Unexpected response from remote server:'), l)
1901 self.ui.status(_('%d files to transfer, %s of data\n') %
1910 self.ui.status(_('%d files to transfer, %s of data\n') %
1902 (total_files, util.bytecount(total_bytes)))
1911 (total_files, util.bytecount(total_bytes)))
1903 start = time.time()
1912 start = time.time()
1904 for i in xrange(total_files):
1913 for i in xrange(total_files):
1905 # XXX doesn't support '\n' or '\r' in filenames
1914 # XXX doesn't support '\n' or '\r' in filenames
1906 l = fp.readline()
1915 l = fp.readline()
1907 try:
1916 try:
1908 name, size = l.split('\0', 1)
1917 name, size = l.split('\0', 1)
1909 size = int(size)
1918 size = int(size)
1910 except ValueError, TypeError:
1919 except ValueError, TypeError:
1911 raise util.UnexpectedOutput(
1920 raise util.UnexpectedOutput(
1912 _('Unexpected response from remote server:'), l)
1921 _('Unexpected response from remote server:'), l)
1913 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1922 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1914 ofp = self.sopener(name, 'w')
1923 ofp = self.sopener(name, 'w')
1915 for chunk in util.filechunkiter(fp, limit=size):
1924 for chunk in util.filechunkiter(fp, limit=size):
1916 ofp.write(chunk)
1925 ofp.write(chunk)
1917 ofp.close()
1926 ofp.close()
1918 elapsed = time.time() - start
1927 elapsed = time.time() - start
1919 if elapsed <= 0:
1928 if elapsed <= 0:
1920 elapsed = 0.001
1929 elapsed = 0.001
1921 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1930 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1922 (util.bytecount(total_bytes), elapsed,
1931 (util.bytecount(total_bytes), elapsed,
1923 util.bytecount(total_bytes / elapsed)))
1932 util.bytecount(total_bytes / elapsed)))
1924 self.reload()
1933 self.reload()
1925 return len(self.heads()) + 1
1934 return len(self.heads()) + 1
1926
1935
1927 def clone(self, remote, heads=[], stream=False):
1936 def clone(self, remote, heads=[], stream=False):
1928 '''clone remote repository.
1937 '''clone remote repository.
1929
1938
1930 keyword arguments:
1939 keyword arguments:
1931 heads: list of revs to clone (forces use of pull)
1940 heads: list of revs to clone (forces use of pull)
1932 stream: use streaming clone if possible'''
1941 stream: use streaming clone if possible'''
1933
1942
1934 # now, all clients that can request uncompressed clones can
1943 # now, all clients that can request uncompressed clones can
1935 # read repo formats supported by all servers that can serve
1944 # read repo formats supported by all servers that can serve
1936 # them.
1945 # them.
1937
1946
1938 # if revlog format changes, client will have to check version
1947 # if revlog format changes, client will have to check version
1939 # and format flags on "stream" capability, and use
1948 # and format flags on "stream" capability, and use
1940 # uncompressed only if compatible.
1949 # uncompressed only if compatible.
1941
1950
1942 if stream and not heads and remote.capable('stream'):
1951 if stream and not heads and remote.capable('stream'):
1943 return self.stream_in(remote)
1952 return self.stream_in(remote)
1944 return self.pull(remote, heads)
1953 return self.pull(remote, heads)
1945
1954
1946 # used to avoid circular references so destructors work
1955 # used to avoid circular references so destructors work
1947 def aftertrans(files):
1956 def aftertrans(files):
1948 renamefiles = [tuple(t) for t in files]
1957 renamefiles = [tuple(t) for t in files]
1949 def a():
1958 def a():
1950 for src, dest in renamefiles:
1959 for src, dest in renamefiles:
1951 util.rename(src, dest)
1960 util.rename(src, dest)
1952 return a
1961 return a
1953
1962
1954 def instance(ui, path, create):
1963 def instance(ui, path, create):
1955 return localrepository(ui, util.drop_scheme('file', path), create)
1964 return localrepository(ui, util.drop_scheme('file', path), create)
1956
1965
1957 def islocal(path):
1966 def islocal(path):
1958 return True
1967 return True
General Comments 0
You need to be logged in to leave comments. Login now