##// END OF EJS Templates
Break core of repo.tag into dirstate/hook-free repo._tag for convert-repo
Brendan Cully -
r4118:35b39097 default
parent child Browse files
Show More
@@ -1,1907 +1,1920 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 os.mkdir(os.path.join(self.path, "store"))
44 os.mkdir(os.path.join(self.path, "store"))
45 requirements = ("revlogv1", "store")
45 requirements = ("revlogv1", "store")
46 reqfile = self.opener("requires", "w")
46 reqfile = self.opener("requires", "w")
47 for r in requirements:
47 for r in requirements:
48 reqfile.write("%s\n" % r)
48 reqfile.write("%s\n" % r)
49 reqfile.close()
49 reqfile.close()
50 # create an invalid changelog
50 # create an invalid changelog
51 self.opener("00changelog.i", "a").write(
51 self.opener("00changelog.i", "a").write(
52 '\0\0\0\2' # represents revlogv2
52 '\0\0\0\2' # represents revlogv2
53 ' dummy changelog to prevent using the old repo layout'
53 ' dummy changelog to prevent using the old repo layout'
54 )
54 )
55 else:
55 else:
56 raise repo.RepoError(_("repository %s not found") % path)
56 raise repo.RepoError(_("repository %s not found") % path)
57 elif create:
57 elif create:
58 raise repo.RepoError(_("repository %s already exists") % path)
58 raise repo.RepoError(_("repository %s already exists") % path)
59 else:
59 else:
60 # find requirements
60 # find requirements
61 try:
61 try:
62 requirements = self.opener("requires").read().splitlines()
62 requirements = self.opener("requires").read().splitlines()
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66 requirements = []
66 requirements = []
67 # check them
67 # check them
68 for r in requirements:
68 for r in requirements:
69 if r not in self.supported:
69 if r not in self.supported:
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71
71
72 # setup store
72 # setup store
73 if "store" in requirements:
73 if "store" in requirements:
74 self.encodefn = util.encodefilename
74 self.encodefn = util.encodefilename
75 self.decodefn = util.decodefilename
75 self.decodefn = util.decodefilename
76 self.spath = os.path.join(self.path, "store")
76 self.spath = os.path.join(self.path, "store")
77 else:
77 else:
78 self.encodefn = lambda x: x
78 self.encodefn = lambda x: x
79 self.decodefn = lambda x: x
79 self.decodefn = lambda x: x
80 self.spath = self.path
80 self.spath = self.path
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82
82
83 self.ui = ui.ui(parentui=parentui)
83 self.ui = ui.ui(parentui=parentui)
84 try:
84 try:
85 self.ui.readconfig(self.join("hgrc"), self.root)
85 self.ui.readconfig(self.join("hgrc"), self.root)
86 except IOError:
86 except IOError:
87 pass
87 pass
88
88
89 v = self.ui.configrevlog()
89 v = self.ui.configrevlog()
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 fl = v.get('flags', None)
92 fl = v.get('flags', None)
93 flags = 0
93 flags = 0
94 if fl != None:
94 if fl != None:
95 for x in fl.split():
95 for x in fl.split():
96 flags |= revlog.flagstr(x)
96 flags |= revlog.flagstr(x)
97 elif self.revlogv1:
97 elif self.revlogv1:
98 flags = revlog.REVLOG_DEFAULT_FLAGS
98 flags = revlog.REVLOG_DEFAULT_FLAGS
99
99
100 v = self.revlogversion | flags
100 v = self.revlogversion | flags
101 self.manifest = manifest.manifest(self.sopener, v)
101 self.manifest = manifest.manifest(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
103
103
104 fallback = self.ui.config('ui', 'fallbackencoding')
104 fallback = self.ui.config('ui', 'fallbackencoding')
105 if fallback:
105 if fallback:
106 util._fallbackencoding = fallback
106 util._fallbackencoding = fallback
107
107
108 # the changelog might not have the inline index flag
108 # the changelog might not have the inline index flag
109 # on. If the format of the changelog is the same as found in
109 # on. If the format of the changelog is the same as found in
110 # .hgrc, apply any flags found in the .hgrc as well.
110 # .hgrc, apply any flags found in the .hgrc as well.
111 # Otherwise, just version from the changelog
111 # Otherwise, just version from the changelog
112 v = self.changelog.version
112 v = self.changelog.version
113 if v == self.revlogversion:
113 if v == self.revlogversion:
114 v |= flags
114 v |= flags
115 self.revlogversion = v
115 self.revlogversion = v
116
116
117 self.tagscache = None
117 self.tagscache = None
118 self.branchcache = None
118 self.branchcache = None
119 self.nodetagscache = None
119 self.nodetagscache = None
120 self.filterpats = {}
120 self.filterpats = {}
121 self.transhandle = None
121 self.transhandle = None
122
122
123 self._link = lambda x: False
123 self._link = lambda x: False
124 if util.checklink(self.root):
124 if util.checklink(self.root):
125 r = self.root # avoid circular reference in lambda
125 r = self.root # avoid circular reference in lambda
126 self._link = lambda x: util.is_link(os.path.join(r, x))
126 self._link = lambda x: util.is_link(os.path.join(r, x))
127
127
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129
129
130 def url(self):
130 def url(self):
131 return 'file:' + self.root
131 return 'file:' + self.root
132
132
133 def hook(self, name, throw=False, **args):
133 def hook(self, name, throw=False, **args):
134 def callhook(hname, funcname):
134 def callhook(hname, funcname):
135 '''call python hook. hook is callable object, looked up as
135 '''call python hook. hook is callable object, looked up as
136 name in python module. if callable returns "true", hook
136 name in python module. if callable returns "true", hook
137 fails, else passes. if hook raises exception, treated as
137 fails, else passes. if hook raises exception, treated as
138 hook failure. exception propagates if throw is "true".
138 hook failure. exception propagates if throw is "true".
139
139
140 reason for "true" meaning "hook failed" is so that
140 reason for "true" meaning "hook failed" is so that
141 unmodified commands (e.g. mercurial.commands.update) can
141 unmodified commands (e.g. mercurial.commands.update) can
142 be run as hooks without wrappers to convert return values.'''
142 be run as hooks without wrappers to convert return values.'''
143
143
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 obj = funcname
145 obj = funcname
146 if not callable(obj):
146 if not callable(obj):
147 d = funcname.rfind('.')
147 d = funcname.rfind('.')
148 if d == -1:
148 if d == -1:
149 raise util.Abort(_('%s hook is invalid ("%s" not in '
149 raise util.Abort(_('%s hook is invalid ("%s" not in '
150 'a module)') % (hname, funcname))
150 'a module)') % (hname, funcname))
151 modname = funcname[:d]
151 modname = funcname[:d]
152 try:
152 try:
153 obj = __import__(modname)
153 obj = __import__(modname)
154 except ImportError:
154 except ImportError:
155 try:
155 try:
156 # extensions are loaded with hgext_ prefix
156 # extensions are loaded with hgext_ prefix
157 obj = __import__("hgext_%s" % modname)
157 obj = __import__("hgext_%s" % modname)
158 except ImportError:
158 except ImportError:
159 raise util.Abort(_('%s hook is invalid '
159 raise util.Abort(_('%s hook is invalid '
160 '(import of "%s" failed)') %
160 '(import of "%s" failed)') %
161 (hname, modname))
161 (hname, modname))
162 try:
162 try:
163 for p in funcname.split('.')[1:]:
163 for p in funcname.split('.')[1:]:
164 obj = getattr(obj, p)
164 obj = getattr(obj, p)
165 except AttributeError, err:
165 except AttributeError, err:
166 raise util.Abort(_('%s hook is invalid '
166 raise util.Abort(_('%s hook is invalid '
167 '("%s" is not defined)') %
167 '("%s" is not defined)') %
168 (hname, funcname))
168 (hname, funcname))
169 if not callable(obj):
169 if not callable(obj):
170 raise util.Abort(_('%s hook is invalid '
170 raise util.Abort(_('%s hook is invalid '
171 '("%s" is not callable)') %
171 '("%s" is not callable)') %
172 (hname, funcname))
172 (hname, funcname))
173 try:
173 try:
174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
175 except (KeyboardInterrupt, util.SignalInterrupt):
175 except (KeyboardInterrupt, util.SignalInterrupt):
176 raise
176 raise
177 except Exception, exc:
177 except Exception, exc:
178 if isinstance(exc, util.Abort):
178 if isinstance(exc, util.Abort):
179 self.ui.warn(_('error: %s hook failed: %s\n') %
179 self.ui.warn(_('error: %s hook failed: %s\n') %
180 (hname, exc.args[0]))
180 (hname, exc.args[0]))
181 else:
181 else:
182 self.ui.warn(_('error: %s hook raised an exception: '
182 self.ui.warn(_('error: %s hook raised an exception: '
183 '%s\n') % (hname, exc))
183 '%s\n') % (hname, exc))
184 if throw:
184 if throw:
185 raise
185 raise
186 self.ui.print_exc()
186 self.ui.print_exc()
187 return True
187 return True
188 if r:
188 if r:
189 if throw:
189 if throw:
190 raise util.Abort(_('%s hook failed') % hname)
190 raise util.Abort(_('%s hook failed') % hname)
191 self.ui.warn(_('warning: %s hook failed\n') % hname)
191 self.ui.warn(_('warning: %s hook failed\n') % hname)
192 return r
192 return r
193
193
194 def runhook(name, cmd):
194 def runhook(name, cmd):
195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
197 r = util.system(cmd, environ=env, cwd=self.root)
197 r = util.system(cmd, environ=env, cwd=self.root)
198 if r:
198 if r:
199 desc, r = util.explain_exit(r)
199 desc, r = util.explain_exit(r)
200 if throw:
200 if throw:
201 raise util.Abort(_('%s hook %s') % (name, desc))
201 raise util.Abort(_('%s hook %s') % (name, desc))
202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
203 return r
203 return r
204
204
205 r = False
205 r = False
206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
207 if hname.split(".", 1)[0] == name and cmd]
207 if hname.split(".", 1)[0] == name and cmd]
208 hooks.sort()
208 hooks.sort()
209 for hname, cmd in hooks:
209 for hname, cmd in hooks:
210 if callable(cmd):
210 if callable(cmd):
211 r = callhook(hname, cmd) or r
211 r = callhook(hname, cmd) or r
212 elif cmd.startswith('python:'):
212 elif cmd.startswith('python:'):
213 r = callhook(hname, cmd[7:].strip()) or r
213 r = callhook(hname, cmd[7:].strip()) or r
214 else:
214 else:
215 r = runhook(hname, cmd) or r
215 r = runhook(hname, cmd) or r
216 return r
216 return r
217
217
218 tag_disallowed = ':\r\n'
218 tag_disallowed = ':\r\n'
219
219
220 def _tag(self, name, node, message, local, user, date, parent=None):
221 use_dirstate = parent is None
222
223 for c in self.tag_disallowed:
224 if c in name:
225 raise util.Abort(_('%r cannot be used in a tag name') % c)
226
227 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
228
229 if local:
230 # local tags are stored in the current charset
231 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
232 self.hook('tag', node=hex(node), tag=name, local=local)
233 return
234
235 # committed tags are stored in UTF-8
236 line = '%s %s\n' % (hex(node), util.fromlocal(name))
237 if use_dirstate:
238 self.wfile('.hgtags', 'ab').write(line)
239 else:
240 ntags = self.filectx('.hgtags', parent).data()
241 self.wfile('.hgtags', 'ab').write(ntags + line)
242 if use_dirstate and self.dirstate.state('.hgtags') == '?':
243 self.add(['.hgtags'])
244
245 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
246
247 self.hook('tag', node=hex(node), tag=name, local=local)
248
249 return tagnode
250
220 def tag(self, name, node, message, local, user, date):
251 def tag(self, name, node, message, local, user, date):
221 '''tag a revision with a symbolic name.
252 '''tag a revision with a symbolic name.
222
253
223 if local is True, the tag is stored in a per-repository file.
254 if local is True, the tag is stored in a per-repository file.
224 otherwise, it is stored in the .hgtags file, and a new
255 otherwise, it is stored in the .hgtags file, and a new
225 changeset is committed with the change.
256 changeset is committed with the change.
226
257
227 keyword arguments:
258 keyword arguments:
228
259
229 local: whether to store tag in non-version-controlled file
260 local: whether to store tag in non-version-controlled file
230 (default False)
261 (default False)
231
262
232 message: commit message to use if committing
263 message: commit message to use if committing
233
264
234 user: name of user to use if committing
265 user: name of user to use if committing
235
266
236 date: date tuple to use if committing'''
267 date: date tuple to use if committing'''
237
268
238 for c in self.tag_disallowed:
239 if c in name:
240 raise util.Abort(_('%r cannot be used in a tag name') % c)
241
242 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
243
244 if local:
245 # local tags are stored in the current charset
246 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
247 self.hook('tag', node=hex(node), tag=name, local=local)
248 return
249
250 for x in self.status()[:5]:
269 for x in self.status()[:5]:
251 if '.hgtags' in x:
270 if '.hgtags' in x:
252 raise util.Abort(_('working copy of .hgtags is changed '
271 raise util.Abort(_('working copy of .hgtags is changed '
253 '(please commit .hgtags manually)'))
272 '(please commit .hgtags manually)'))
254
273
255 # committed tags are stored in UTF-8
256 line = '%s %s\n' % (hex(node), util.fromlocal(name))
257 self.wfile('.hgtags', 'ab').write(line)
258 if self.dirstate.state('.hgtags') == '?':
259 self.add(['.hgtags'])
260
274
261 self.commit(['.hgtags'], message, user, date)
275 self._tag(name, node, message, local, user, date)
262 self.hook('tag', node=hex(node), tag=name, local=local)
263
276
264 def tags(self):
277 def tags(self):
265 '''return a mapping of tag to node'''
278 '''return a mapping of tag to node'''
266 if not self.tagscache:
279 if not self.tagscache:
267 self.tagscache = {}
280 self.tagscache = {}
268
281
269 def parsetag(line, context):
282 def parsetag(line, context):
270 if not line:
283 if not line:
271 return
284 return
272 s = l.split(" ", 1)
285 s = l.split(" ", 1)
273 if len(s) != 2:
286 if len(s) != 2:
274 self.ui.warn(_("%s: cannot parse entry\n") % context)
287 self.ui.warn(_("%s: cannot parse entry\n") % context)
275 return
288 return
276 node, key = s
289 node, key = s
277 key = util.tolocal(key.strip()) # stored in UTF-8
290 key = util.tolocal(key.strip()) # stored in UTF-8
278 try:
291 try:
279 bin_n = bin(node)
292 bin_n = bin(node)
280 except TypeError:
293 except TypeError:
281 self.ui.warn(_("%s: node '%s' is not well formed\n") %
294 self.ui.warn(_("%s: node '%s' is not well formed\n") %
282 (context, node))
295 (context, node))
283 return
296 return
284 if bin_n not in self.changelog.nodemap:
297 if bin_n not in self.changelog.nodemap:
285 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
298 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
286 (context, key))
299 (context, key))
287 return
300 return
288 self.tagscache[key] = bin_n
301 self.tagscache[key] = bin_n
289
302
290 # read the tags file from each head, ending with the tip,
303 # read the tags file from each head, ending with the tip,
291 # and add each tag found to the map, with "newer" ones
304 # and add each tag found to the map, with "newer" ones
292 # taking precedence
305 # taking precedence
293 f = None
306 f = None
294 for rev, node, fnode in self._hgtagsnodes():
307 for rev, node, fnode in self._hgtagsnodes():
295 f = (f and f.filectx(fnode) or
308 f = (f and f.filectx(fnode) or
296 self.filectx('.hgtags', fileid=fnode))
309 self.filectx('.hgtags', fileid=fnode))
297 count = 0
310 count = 0
298 for l in f.data().splitlines():
311 for l in f.data().splitlines():
299 count += 1
312 count += 1
300 parsetag(l, _("%s, line %d") % (str(f), count))
313 parsetag(l, _("%s, line %d") % (str(f), count))
301
314
302 try:
315 try:
303 f = self.opener("localtags")
316 f = self.opener("localtags")
304 count = 0
317 count = 0
305 for l in f:
318 for l in f:
306 # localtags are stored in the local character set
319 # localtags are stored in the local character set
307 # while the internal tag table is stored in UTF-8
320 # while the internal tag table is stored in UTF-8
308 l = util.fromlocal(l)
321 l = util.fromlocal(l)
309 count += 1
322 count += 1
310 parsetag(l, _("localtags, line %d") % count)
323 parsetag(l, _("localtags, line %d") % count)
311 except IOError:
324 except IOError:
312 pass
325 pass
313
326
314 self.tagscache['tip'] = self.changelog.tip()
327 self.tagscache['tip'] = self.changelog.tip()
315
328
316 return self.tagscache
329 return self.tagscache
317
330
318 def _hgtagsnodes(self):
331 def _hgtagsnodes(self):
319 heads = self.heads()
332 heads = self.heads()
320 heads.reverse()
333 heads.reverse()
321 last = {}
334 last = {}
322 ret = []
335 ret = []
323 for node in heads:
336 for node in heads:
324 c = self.changectx(node)
337 c = self.changectx(node)
325 rev = c.rev()
338 rev = c.rev()
326 try:
339 try:
327 fnode = c.filenode('.hgtags')
340 fnode = c.filenode('.hgtags')
328 except revlog.LookupError:
341 except revlog.LookupError:
329 continue
342 continue
330 ret.append((rev, node, fnode))
343 ret.append((rev, node, fnode))
331 if fnode in last:
344 if fnode in last:
332 ret[last[fnode]] = None
345 ret[last[fnode]] = None
333 last[fnode] = len(ret) - 1
346 last[fnode] = len(ret) - 1
334 return [item for item in ret if item]
347 return [item for item in ret if item]
335
348
336 def tagslist(self):
349 def tagslist(self):
337 '''return a list of tags ordered by revision'''
350 '''return a list of tags ordered by revision'''
338 l = []
351 l = []
339 for t, n in self.tags().items():
352 for t, n in self.tags().items():
340 try:
353 try:
341 r = self.changelog.rev(n)
354 r = self.changelog.rev(n)
342 except:
355 except:
343 r = -2 # sort to the beginning of the list if unknown
356 r = -2 # sort to the beginning of the list if unknown
344 l.append((r, t, n))
357 l.append((r, t, n))
345 l.sort()
358 l.sort()
346 return [(t, n) for r, t, n in l]
359 return [(t, n) for r, t, n in l]
347
360
348 def nodetags(self, node):
361 def nodetags(self, node):
349 '''return the tags associated with a node'''
362 '''return the tags associated with a node'''
350 if not self.nodetagscache:
363 if not self.nodetagscache:
351 self.nodetagscache = {}
364 self.nodetagscache = {}
352 for t, n in self.tags().items():
365 for t, n in self.tags().items():
353 self.nodetagscache.setdefault(n, []).append(t)
366 self.nodetagscache.setdefault(n, []).append(t)
354 return self.nodetagscache.get(node, [])
367 return self.nodetagscache.get(node, [])
355
368
356 def _branchtags(self):
369 def _branchtags(self):
357 partial, last, lrev = self._readbranchcache()
370 partial, last, lrev = self._readbranchcache()
358
371
359 tiprev = self.changelog.count() - 1
372 tiprev = self.changelog.count() - 1
360 if lrev != tiprev:
373 if lrev != tiprev:
361 self._updatebranchcache(partial, lrev+1, tiprev+1)
374 self._updatebranchcache(partial, lrev+1, tiprev+1)
362 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
363
376
364 return partial
377 return partial
365
378
366 def branchtags(self):
379 def branchtags(self):
367 if self.branchcache is not None:
380 if self.branchcache is not None:
368 return self.branchcache
381 return self.branchcache
369
382
370 self.branchcache = {} # avoid recursion in changectx
383 self.branchcache = {} # avoid recursion in changectx
371 partial = self._branchtags()
384 partial = self._branchtags()
372
385
373 # the branch cache is stored on disk as UTF-8, but in the local
386 # the branch cache is stored on disk as UTF-8, but in the local
374 # charset internally
387 # charset internally
375 for k, v in partial.items():
388 for k, v in partial.items():
376 self.branchcache[util.tolocal(k)] = v
389 self.branchcache[util.tolocal(k)] = v
377 return self.branchcache
390 return self.branchcache
378
391
379 def _readbranchcache(self):
392 def _readbranchcache(self):
380 partial = {}
393 partial = {}
381 try:
394 try:
382 f = self.opener("branches.cache")
395 f = self.opener("branches.cache")
383 lines = f.read().split('\n')
396 lines = f.read().split('\n')
384 f.close()
397 f.close()
385 last, lrev = lines.pop(0).rstrip().split(" ", 1)
398 last, lrev = lines.pop(0).rstrip().split(" ", 1)
386 last, lrev = bin(last), int(lrev)
399 last, lrev = bin(last), int(lrev)
387 if not (lrev < self.changelog.count() and
400 if not (lrev < self.changelog.count() and
388 self.changelog.node(lrev) == last): # sanity check
401 self.changelog.node(lrev) == last): # sanity check
389 # invalidate the cache
402 # invalidate the cache
390 raise ValueError('Invalid branch cache: unknown tip')
403 raise ValueError('Invalid branch cache: unknown tip')
391 for l in lines:
404 for l in lines:
392 if not l: continue
405 if not l: continue
393 node, label = l.rstrip().split(" ", 1)
406 node, label = l.rstrip().split(" ", 1)
394 partial[label] = bin(node)
407 partial[label] = bin(node)
395 except (KeyboardInterrupt, util.SignalInterrupt):
408 except (KeyboardInterrupt, util.SignalInterrupt):
396 raise
409 raise
397 except Exception, inst:
410 except Exception, inst:
398 if self.ui.debugflag:
411 if self.ui.debugflag:
399 self.ui.warn(str(inst), '\n')
412 self.ui.warn(str(inst), '\n')
400 partial, last, lrev = {}, nullid, nullrev
413 partial, last, lrev = {}, nullid, nullrev
401 return partial, last, lrev
414 return partial, last, lrev
402
415
403 def _writebranchcache(self, branches, tip, tiprev):
416 def _writebranchcache(self, branches, tip, tiprev):
404 try:
417 try:
405 f = self.opener("branches.cache", "w")
418 f = self.opener("branches.cache", "w")
406 f.write("%s %s\n" % (hex(tip), tiprev))
419 f.write("%s %s\n" % (hex(tip), tiprev))
407 for label, node in branches.iteritems():
420 for label, node in branches.iteritems():
408 f.write("%s %s\n" % (hex(node), label))
421 f.write("%s %s\n" % (hex(node), label))
409 except IOError:
422 except IOError:
410 pass
423 pass
411
424
412 def _updatebranchcache(self, partial, start, end):
425 def _updatebranchcache(self, partial, start, end):
413 for r in xrange(start, end):
426 for r in xrange(start, end):
414 c = self.changectx(r)
427 c = self.changectx(r)
415 b = c.branch()
428 b = c.branch()
416 if b:
429 if b:
417 partial[b] = c.node()
430 partial[b] = c.node()
418
431
419 def lookup(self, key):
432 def lookup(self, key):
420 if key == '.':
433 if key == '.':
421 key = self.dirstate.parents()[0]
434 key = self.dirstate.parents()[0]
422 if key == nullid:
435 if key == nullid:
423 raise repo.RepoError(_("no revision checked out"))
436 raise repo.RepoError(_("no revision checked out"))
424 elif key == 'null':
437 elif key == 'null':
425 return nullid
438 return nullid
426 n = self.changelog._match(key)
439 n = self.changelog._match(key)
427 if n:
440 if n:
428 return n
441 return n
429 if key in self.tags():
442 if key in self.tags():
430 return self.tags()[key]
443 return self.tags()[key]
431 if key in self.branchtags():
444 if key in self.branchtags():
432 return self.branchtags()[key]
445 return self.branchtags()[key]
433 n = self.changelog._partialmatch(key)
446 n = self.changelog._partialmatch(key)
434 if n:
447 if n:
435 return n
448 return n
436 raise repo.RepoError(_("unknown revision '%s'") % key)
449 raise repo.RepoError(_("unknown revision '%s'") % key)
437
450
438 def dev(self):
451 def dev(self):
439 return os.lstat(self.path).st_dev
452 return os.lstat(self.path).st_dev
440
453
441 def local(self):
454 def local(self):
442 return True
455 return True
443
456
444 def join(self, f):
457 def join(self, f):
445 return os.path.join(self.path, f)
458 return os.path.join(self.path, f)
446
459
447 def sjoin(self, f):
460 def sjoin(self, f):
448 f = self.encodefn(f)
461 f = self.encodefn(f)
449 return os.path.join(self.spath, f)
462 return os.path.join(self.spath, f)
450
463
451 def wjoin(self, f):
464 def wjoin(self, f):
452 return os.path.join(self.root, f)
465 return os.path.join(self.root, f)
453
466
454 def file(self, f):
467 def file(self, f):
455 if f[0] == '/':
468 if f[0] == '/':
456 f = f[1:]
469 f = f[1:]
457 return filelog.filelog(self.sopener, f, self.revlogversion)
470 return filelog.filelog(self.sopener, f, self.revlogversion)
458
471
459 def changectx(self, changeid=None):
472 def changectx(self, changeid=None):
460 return context.changectx(self, changeid)
473 return context.changectx(self, changeid)
461
474
462 def workingctx(self):
475 def workingctx(self):
463 return context.workingctx(self)
476 return context.workingctx(self)
464
477
465 def parents(self, changeid=None):
478 def parents(self, changeid=None):
466 '''
479 '''
467 get list of changectxs for parents of changeid or working directory
480 get list of changectxs for parents of changeid or working directory
468 '''
481 '''
469 if changeid is None:
482 if changeid is None:
470 pl = self.dirstate.parents()
483 pl = self.dirstate.parents()
471 else:
484 else:
472 n = self.changelog.lookup(changeid)
485 n = self.changelog.lookup(changeid)
473 pl = self.changelog.parents(n)
486 pl = self.changelog.parents(n)
474 if pl[1] == nullid:
487 if pl[1] == nullid:
475 return [self.changectx(pl[0])]
488 return [self.changectx(pl[0])]
476 return [self.changectx(pl[0]), self.changectx(pl[1])]
489 return [self.changectx(pl[0]), self.changectx(pl[1])]
477
490
478 def filectx(self, path, changeid=None, fileid=None):
491 def filectx(self, path, changeid=None, fileid=None):
479 """changeid can be a changeset revision, node, or tag.
492 """changeid can be a changeset revision, node, or tag.
480 fileid can be a file revision or node."""
493 fileid can be a file revision or node."""
481 return context.filectx(self, path, changeid, fileid)
494 return context.filectx(self, path, changeid, fileid)
482
495
483 def getcwd(self):
496 def getcwd(self):
484 return self.dirstate.getcwd()
497 return self.dirstate.getcwd()
485
498
486 def wfile(self, f, mode='r'):
499 def wfile(self, f, mode='r'):
487 return self.wopener(f, mode)
500 return self.wopener(f, mode)
488
501
489 def _filter(self, filter, filename, data):
502 def _filter(self, filter, filename, data):
490 if filter not in self.filterpats:
503 if filter not in self.filterpats:
491 l = []
504 l = []
492 for pat, cmd in self.ui.configitems(filter):
505 for pat, cmd in self.ui.configitems(filter):
493 mf = util.matcher(self.root, "", [pat], [], [])[1]
506 mf = util.matcher(self.root, "", [pat], [], [])[1]
494 l.append((mf, cmd))
507 l.append((mf, cmd))
495 self.filterpats[filter] = l
508 self.filterpats[filter] = l
496
509
497 for mf, cmd in self.filterpats[filter]:
510 for mf, cmd in self.filterpats[filter]:
498 if mf(filename):
511 if mf(filename):
499 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
500 data = util.filter(data, cmd)
513 data = util.filter(data, cmd)
501 break
514 break
502
515
503 return data
516 return data
504
517
505 def wread(self, filename):
518 def wread(self, filename):
506 if self._link(filename):
519 if self._link(filename):
507 data = os.readlink(self.wjoin(filename))
520 data = os.readlink(self.wjoin(filename))
508 else:
521 else:
509 data = self.wopener(filename, 'r').read()
522 data = self.wopener(filename, 'r').read()
510 return self._filter("encode", filename, data)
523 return self._filter("encode", filename, data)
511
524
512 def wwrite(self, filename, data, flags):
525 def wwrite(self, filename, data, flags):
513 data = self._filter("decode", filename, data)
526 data = self._filter("decode", filename, data)
514 if "l" in flags:
527 if "l" in flags:
515 try:
528 try:
516 os.unlink(self.wjoin(filename))
529 os.unlink(self.wjoin(filename))
517 except OSError:
530 except OSError:
518 pass
531 pass
519 os.symlink(data, self.wjoin(filename))
532 os.symlink(data, self.wjoin(filename))
520 else:
533 else:
521 try:
534 try:
522 if self._link(filename):
535 if self._link(filename):
523 os.unlink(self.wjoin(filename))
536 os.unlink(self.wjoin(filename))
524 except OSError:
537 except OSError:
525 pass
538 pass
526 self.wopener(filename, 'w').write(data)
539 self.wopener(filename, 'w').write(data)
527 util.set_exec(self.wjoin(filename), "x" in flags)
540 util.set_exec(self.wjoin(filename), "x" in flags)
528
541
529 def wwritedata(self, filename, data):
542 def wwritedata(self, filename, data):
530 return self._filter("decode", filename, data)
543 return self._filter("decode", filename, data)
531
544
532 def transaction(self):
545 def transaction(self):
533 tr = self.transhandle
546 tr = self.transhandle
534 if tr != None and tr.running():
547 if tr != None and tr.running():
535 return tr.nest()
548 return tr.nest()
536
549
537 # save dirstate for rollback
550 # save dirstate for rollback
538 try:
551 try:
539 ds = self.opener("dirstate").read()
552 ds = self.opener("dirstate").read()
540 except IOError:
553 except IOError:
541 ds = ""
554 ds = ""
542 self.opener("journal.dirstate", "w").write(ds)
555 self.opener("journal.dirstate", "w").write(ds)
543
556
544 renames = [(self.sjoin("journal"), self.sjoin("undo")),
557 renames = [(self.sjoin("journal"), self.sjoin("undo")),
545 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
558 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
546 tr = transaction.transaction(self.ui.warn, self.sopener,
559 tr = transaction.transaction(self.ui.warn, self.sopener,
547 self.sjoin("journal"),
560 self.sjoin("journal"),
548 aftertrans(renames))
561 aftertrans(renames))
549 self.transhandle = tr
562 self.transhandle = tr
550 return tr
563 return tr
551
564
552 def recover(self):
565 def recover(self):
553 l = self.lock()
566 l = self.lock()
554 if os.path.exists(self.sjoin("journal")):
567 if os.path.exists(self.sjoin("journal")):
555 self.ui.status(_("rolling back interrupted transaction\n"))
568 self.ui.status(_("rolling back interrupted transaction\n"))
556 transaction.rollback(self.sopener, self.sjoin("journal"))
569 transaction.rollback(self.sopener, self.sjoin("journal"))
557 self.reload()
570 self.reload()
558 return True
571 return True
559 else:
572 else:
560 self.ui.warn(_("no interrupted transaction available\n"))
573 self.ui.warn(_("no interrupted transaction available\n"))
561 return False
574 return False
562
575
563 def rollback(self, wlock=None):
576 def rollback(self, wlock=None):
564 if not wlock:
577 if not wlock:
565 wlock = self.wlock()
578 wlock = self.wlock()
566 l = self.lock()
579 l = self.lock()
567 if os.path.exists(self.sjoin("undo")):
580 if os.path.exists(self.sjoin("undo")):
568 self.ui.status(_("rolling back last transaction\n"))
581 self.ui.status(_("rolling back last transaction\n"))
569 transaction.rollback(self.sopener, self.sjoin("undo"))
582 transaction.rollback(self.sopener, self.sjoin("undo"))
570 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
583 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
571 self.reload()
584 self.reload()
572 self.wreload()
585 self.wreload()
573 else:
586 else:
574 self.ui.warn(_("no rollback information available\n"))
587 self.ui.warn(_("no rollback information available\n"))
575
588
576 def wreload(self):
589 def wreload(self):
577 self.dirstate.read()
590 self.dirstate.read()
578
591
579 def reload(self):
592 def reload(self):
580 self.changelog.load()
593 self.changelog.load()
581 self.manifest.load()
594 self.manifest.load()
582 self.tagscache = None
595 self.tagscache = None
583 self.nodetagscache = None
596 self.nodetagscache = None
584
597
585 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
598 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
586 desc=None):
599 desc=None):
587 try:
600 try:
588 l = lock.lock(lockname, 0, releasefn, desc=desc)
601 l = lock.lock(lockname, 0, releasefn, desc=desc)
589 except lock.LockHeld, inst:
602 except lock.LockHeld, inst:
590 if not wait:
603 if not wait:
591 raise
604 raise
592 self.ui.warn(_("waiting for lock on %s held by %r\n") %
605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
593 (desc, inst.locker))
606 (desc, inst.locker))
594 # default to 600 seconds timeout
607 # default to 600 seconds timeout
595 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
608 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
596 releasefn, desc=desc)
609 releasefn, desc=desc)
597 if acquirefn:
610 if acquirefn:
598 acquirefn()
611 acquirefn()
599 return l
612 return l
600
613
601 def lock(self, wait=1):
614 def lock(self, wait=1):
602 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
615 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
603 desc=_('repository %s') % self.origroot)
616 desc=_('repository %s') % self.origroot)
604
617
605 def wlock(self, wait=1):
618 def wlock(self, wait=1):
606 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
619 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
607 self.wreload,
620 self.wreload,
608 desc=_('working directory of %s') % self.origroot)
621 desc=_('working directory of %s') % self.origroot)
609
622
610 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
623 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
611 """
624 """
612 commit an individual file as part of a larger transaction
625 commit an individual file as part of a larger transaction
613 """
626 """
614
627
615 t = self.wread(fn)
628 t = self.wread(fn)
616 fl = self.file(fn)
629 fl = self.file(fn)
617 fp1 = manifest1.get(fn, nullid)
630 fp1 = manifest1.get(fn, nullid)
618 fp2 = manifest2.get(fn, nullid)
631 fp2 = manifest2.get(fn, nullid)
619
632
620 meta = {}
633 meta = {}
621 cp = self.dirstate.copied(fn)
634 cp = self.dirstate.copied(fn)
622 if cp:
635 if cp:
623 # Mark the new revision of this file as a copy of another
636 # Mark the new revision of this file as a copy of another
624 # file. This copy data will effectively act as a parent
637 # file. This copy data will effectively act as a parent
625 # of this new revision. If this is a merge, the first
638 # of this new revision. If this is a merge, the first
626 # parent will be the nullid (meaning "look up the copy data")
639 # parent will be the nullid (meaning "look up the copy data")
627 # and the second one will be the other parent. For example:
640 # and the second one will be the other parent. For example:
628 #
641 #
629 # 0 --- 1 --- 3 rev1 changes file foo
642 # 0 --- 1 --- 3 rev1 changes file foo
630 # \ / rev2 renames foo to bar and changes it
643 # \ / rev2 renames foo to bar and changes it
631 # \- 2 -/ rev3 should have bar with all changes and
644 # \- 2 -/ rev3 should have bar with all changes and
632 # should record that bar descends from
645 # should record that bar descends from
633 # bar in rev2 and foo in rev1
646 # bar in rev2 and foo in rev1
634 #
647 #
635 # this allows this merge to succeed:
648 # this allows this merge to succeed:
636 #
649 #
637 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
650 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
638 # \ / merging rev3 and rev4 should use bar@rev2
651 # \ / merging rev3 and rev4 should use bar@rev2
639 # \- 2 --- 4 as the merge base
652 # \- 2 --- 4 as the merge base
640 #
653 #
641 meta["copy"] = cp
654 meta["copy"] = cp
642 if not manifest2: # not a branch merge
655 if not manifest2: # not a branch merge
643 meta["copyrev"] = hex(manifest1.get(cp, nullid))
656 meta["copyrev"] = hex(manifest1.get(cp, nullid))
644 fp2 = nullid
657 fp2 = nullid
645 elif fp2 != nullid: # copied on remote side
658 elif fp2 != nullid: # copied on remote side
646 meta["copyrev"] = hex(manifest1.get(cp, nullid))
659 meta["copyrev"] = hex(manifest1.get(cp, nullid))
647 elif fp1 != nullid: # copied on local side, reversed
660 elif fp1 != nullid: # copied on local side, reversed
648 meta["copyrev"] = hex(manifest2.get(cp))
661 meta["copyrev"] = hex(manifest2.get(cp))
649 fp2 = fp1
662 fp2 = fp1
650 else: # directory rename
663 else: # directory rename
651 meta["copyrev"] = hex(manifest1.get(cp, nullid))
664 meta["copyrev"] = hex(manifest1.get(cp, nullid))
652 self.ui.debug(_(" %s: copy %s:%s\n") %
665 self.ui.debug(_(" %s: copy %s:%s\n") %
653 (fn, cp, meta["copyrev"]))
666 (fn, cp, meta["copyrev"]))
654 fp1 = nullid
667 fp1 = nullid
655 elif fp2 != nullid:
668 elif fp2 != nullid:
656 # is one parent an ancestor of the other?
669 # is one parent an ancestor of the other?
657 fpa = fl.ancestor(fp1, fp2)
670 fpa = fl.ancestor(fp1, fp2)
658 if fpa == fp1:
671 if fpa == fp1:
659 fp1, fp2 = fp2, nullid
672 fp1, fp2 = fp2, nullid
660 elif fpa == fp2:
673 elif fpa == fp2:
661 fp2 = nullid
674 fp2 = nullid
662
675
663 # is the file unmodified from the parent? report existing entry
676 # is the file unmodified from the parent? report existing entry
664 if fp2 == nullid and not fl.cmp(fp1, t):
677 if fp2 == nullid and not fl.cmp(fp1, t):
665 return fp1
678 return fp1
666
679
667 changelist.append(fn)
680 changelist.append(fn)
668 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
681 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
669
682
670 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
683 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
671 if p1 is None:
684 if p1 is None:
672 p1, p2 = self.dirstate.parents()
685 p1, p2 = self.dirstate.parents()
673 return self.commit(files=files, text=text, user=user, date=date,
686 return self.commit(files=files, text=text, user=user, date=date,
674 p1=p1, p2=p2, wlock=wlock, extra=extra)
687 p1=p1, p2=p2, wlock=wlock, extra=extra)
675
688
676 def commit(self, files=None, text="", user=None, date=None,
689 def commit(self, files=None, text="", user=None, date=None,
677 match=util.always, force=False, lock=None, wlock=None,
690 match=util.always, force=False, lock=None, wlock=None,
678 force_editor=False, p1=None, p2=None, extra={}):
691 force_editor=False, p1=None, p2=None, extra={}):
679
692
680 commit = []
693 commit = []
681 remove = []
694 remove = []
682 changed = []
695 changed = []
683 use_dirstate = (p1 is None) # not rawcommit
696 use_dirstate = (p1 is None) # not rawcommit
684 extra = extra.copy()
697 extra = extra.copy()
685
698
686 if use_dirstate:
699 if use_dirstate:
687 if files:
700 if files:
688 for f in files:
701 for f in files:
689 s = self.dirstate.state(f)
702 s = self.dirstate.state(f)
690 if s in 'nmai':
703 if s in 'nmai':
691 commit.append(f)
704 commit.append(f)
692 elif s == 'r':
705 elif s == 'r':
693 remove.append(f)
706 remove.append(f)
694 else:
707 else:
695 self.ui.warn(_("%s not tracked!\n") % f)
708 self.ui.warn(_("%s not tracked!\n") % f)
696 else:
709 else:
697 changes = self.status(match=match)[:5]
710 changes = self.status(match=match)[:5]
698 modified, added, removed, deleted, unknown = changes
711 modified, added, removed, deleted, unknown = changes
699 commit = modified + added
712 commit = modified + added
700 remove = removed
713 remove = removed
701 else:
714 else:
702 commit = files
715 commit = files
703
716
704 if use_dirstate:
717 if use_dirstate:
705 p1, p2 = self.dirstate.parents()
718 p1, p2 = self.dirstate.parents()
706 update_dirstate = True
719 update_dirstate = True
707 else:
720 else:
708 p1, p2 = p1, p2 or nullid
721 p1, p2 = p1, p2 or nullid
709 update_dirstate = (self.dirstate.parents()[0] == p1)
722 update_dirstate = (self.dirstate.parents()[0] == p1)
710
723
711 c1 = self.changelog.read(p1)
724 c1 = self.changelog.read(p1)
712 c2 = self.changelog.read(p2)
725 c2 = self.changelog.read(p2)
713 m1 = self.manifest.read(c1[0]).copy()
726 m1 = self.manifest.read(c1[0]).copy()
714 m2 = self.manifest.read(c2[0])
727 m2 = self.manifest.read(c2[0])
715
728
716 if use_dirstate:
729 if use_dirstate:
717 branchname = self.workingctx().branch()
730 branchname = self.workingctx().branch()
718 try:
731 try:
719 branchname = branchname.decode('UTF-8').encode('UTF-8')
732 branchname = branchname.decode('UTF-8').encode('UTF-8')
720 except UnicodeDecodeError:
733 except UnicodeDecodeError:
721 raise util.Abort(_('branch name not in UTF-8!'))
734 raise util.Abort(_('branch name not in UTF-8!'))
722 else:
735 else:
723 branchname = ""
736 branchname = ""
724
737
725 if use_dirstate:
738 if use_dirstate:
726 oldname = c1[5].get("branch", "") # stored in UTF-8
739 oldname = c1[5].get("branch", "") # stored in UTF-8
727 if not commit and not remove and not force and p2 == nullid and \
740 if not commit and not remove and not force and p2 == nullid and \
728 branchname == oldname:
741 branchname == oldname:
729 self.ui.status(_("nothing changed\n"))
742 self.ui.status(_("nothing changed\n"))
730 return None
743 return None
731
744
732 xp1 = hex(p1)
745 xp1 = hex(p1)
733 if p2 == nullid: xp2 = ''
746 if p2 == nullid: xp2 = ''
734 else: xp2 = hex(p2)
747 else: xp2 = hex(p2)
735
748
736 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
749 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
737
750
738 if not wlock:
751 if not wlock:
739 wlock = self.wlock()
752 wlock = self.wlock()
740 if not lock:
753 if not lock:
741 lock = self.lock()
754 lock = self.lock()
742 tr = self.transaction()
755 tr = self.transaction()
743
756
744 # check in files
757 # check in files
745 new = {}
758 new = {}
746 linkrev = self.changelog.count()
759 linkrev = self.changelog.count()
747 commit.sort()
760 commit.sort()
748 is_exec = util.execfunc(self.root, m1.execf)
761 is_exec = util.execfunc(self.root, m1.execf)
749 is_link = util.linkfunc(self.root, m1.linkf)
762 is_link = util.linkfunc(self.root, m1.linkf)
750 for f in commit:
763 for f in commit:
751 self.ui.note(f + "\n")
764 self.ui.note(f + "\n")
752 try:
765 try:
753 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
766 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
754 m1.set(f, is_exec(f), is_link(f))
767 m1.set(f, is_exec(f), is_link(f))
755 except (OSError, IOError):
768 except (OSError, IOError):
756 if use_dirstate:
769 if use_dirstate:
757 self.ui.warn(_("trouble committing %s!\n") % f)
770 self.ui.warn(_("trouble committing %s!\n") % f)
758 raise
771 raise
759 else:
772 else:
760 remove.append(f)
773 remove.append(f)
761
774
762 # update manifest
775 # update manifest
763 m1.update(new)
776 m1.update(new)
764 remove.sort()
777 remove.sort()
765 removed = []
778 removed = []
766
779
767 for f in remove:
780 for f in remove:
768 if f in m1:
781 if f in m1:
769 del m1[f]
782 del m1[f]
770 removed.append(f)
783 removed.append(f)
771 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
784 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
772
785
773 # add changeset
786 # add changeset
774 new = new.keys()
787 new = new.keys()
775 new.sort()
788 new.sort()
776
789
777 user = user or self.ui.username()
790 user = user or self.ui.username()
778 if not text or force_editor:
791 if not text or force_editor:
779 edittext = []
792 edittext = []
780 if text:
793 if text:
781 edittext.append(text)
794 edittext.append(text)
782 edittext.append("")
795 edittext.append("")
783 edittext.append("HG: user: %s" % user)
796 edittext.append("HG: user: %s" % user)
784 if p2 != nullid:
797 if p2 != nullid:
785 edittext.append("HG: branch merge")
798 edittext.append("HG: branch merge")
786 if branchname:
799 if branchname:
787 edittext.append("HG: branch %s" % util.tolocal(branchname))
800 edittext.append("HG: branch %s" % util.tolocal(branchname))
788 edittext.extend(["HG: changed %s" % f for f in changed])
801 edittext.extend(["HG: changed %s" % f for f in changed])
789 edittext.extend(["HG: removed %s" % f for f in removed])
802 edittext.extend(["HG: removed %s" % f for f in removed])
790 if not changed and not remove:
803 if not changed and not remove:
791 edittext.append("HG: no files changed")
804 edittext.append("HG: no files changed")
792 edittext.append("")
805 edittext.append("")
793 # run editor in the repository root
806 # run editor in the repository root
794 olddir = os.getcwd()
807 olddir = os.getcwd()
795 os.chdir(self.root)
808 os.chdir(self.root)
796 text = self.ui.edit("\n".join(edittext), user)
809 text = self.ui.edit("\n".join(edittext), user)
797 os.chdir(olddir)
810 os.chdir(olddir)
798
811
799 lines = [line.rstrip() for line in text.rstrip().splitlines()]
812 lines = [line.rstrip() for line in text.rstrip().splitlines()]
800 while lines and not lines[0]:
813 while lines and not lines[0]:
801 del lines[0]
814 del lines[0]
802 if not lines:
815 if not lines:
803 return None
816 return None
804 text = '\n'.join(lines)
817 text = '\n'.join(lines)
805 if branchname:
818 if branchname:
806 extra["branch"] = branchname
819 extra["branch"] = branchname
807 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
820 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
808 user, date, extra)
821 user, date, extra)
809 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
822 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
810 parent2=xp2)
823 parent2=xp2)
811 tr.close()
824 tr.close()
812
825
813 if self.branchcache and "branch" in extra:
826 if self.branchcache and "branch" in extra:
814 self.branchcache[util.tolocal(extra["branch"])] = n
827 self.branchcache[util.tolocal(extra["branch"])] = n
815
828
816 if use_dirstate or update_dirstate:
829 if use_dirstate or update_dirstate:
817 self.dirstate.setparents(n)
830 self.dirstate.setparents(n)
818 if use_dirstate:
831 if use_dirstate:
819 self.dirstate.update(new, "n")
832 self.dirstate.update(new, "n")
820 self.dirstate.forget(removed)
833 self.dirstate.forget(removed)
821
834
822 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
835 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
823 return n
836 return n
824
837
825 def walk(self, node=None, files=[], match=util.always, badmatch=None):
838 def walk(self, node=None, files=[], match=util.always, badmatch=None):
826 '''
839 '''
827 walk recursively through the directory tree or a given
840 walk recursively through the directory tree or a given
828 changeset, finding all files matched by the match
841 changeset, finding all files matched by the match
829 function
842 function
830
843
831 results are yielded in a tuple (src, filename), where src
844 results are yielded in a tuple (src, filename), where src
832 is one of:
845 is one of:
833 'f' the file was found in the directory tree
846 'f' the file was found in the directory tree
834 'm' the file was only in the dirstate and not in the tree
847 'm' the file was only in the dirstate and not in the tree
835 'b' file was not found and matched badmatch
848 'b' file was not found and matched badmatch
836 '''
849 '''
837
850
838 if node:
851 if node:
839 fdict = dict.fromkeys(files)
852 fdict = dict.fromkeys(files)
840 for fn in self.manifest.read(self.changelog.read(node)[0]):
853 for fn in self.manifest.read(self.changelog.read(node)[0]):
841 for ffn in fdict:
854 for ffn in fdict:
842 # match if the file is the exact name or a directory
855 # match if the file is the exact name or a directory
843 if ffn == fn or fn.startswith("%s/" % ffn):
856 if ffn == fn or fn.startswith("%s/" % ffn):
844 del fdict[ffn]
857 del fdict[ffn]
845 break
858 break
846 if match(fn):
859 if match(fn):
847 yield 'm', fn
860 yield 'm', fn
848 for fn in fdict:
861 for fn in fdict:
849 if badmatch and badmatch(fn):
862 if badmatch and badmatch(fn):
850 if match(fn):
863 if match(fn):
851 yield 'b', fn
864 yield 'b', fn
852 else:
865 else:
853 self.ui.warn(_('%s: No such file in rev %s\n') % (
866 self.ui.warn(_('%s: No such file in rev %s\n') % (
854 util.pathto(self.getcwd(), fn), short(node)))
867 util.pathto(self.getcwd(), fn), short(node)))
855 else:
868 else:
856 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
869 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
857 yield src, fn
870 yield src, fn
858
871
859 def status(self, node1=None, node2=None, files=[], match=util.always,
872 def status(self, node1=None, node2=None, files=[], match=util.always,
860 wlock=None, list_ignored=False, list_clean=False):
873 wlock=None, list_ignored=False, list_clean=False):
861 """return status of files between two nodes or node and working directory
874 """return status of files between two nodes or node and working directory
862
875
863 If node1 is None, use the first dirstate parent instead.
876 If node1 is None, use the first dirstate parent instead.
864 If node2 is None, compare node1 with working directory.
877 If node2 is None, compare node1 with working directory.
865 """
878 """
866
879
867 def fcmp(fn, mf):
880 def fcmp(fn, mf):
868 t1 = self.wread(fn)
881 t1 = self.wread(fn)
869 return self.file(fn).cmp(mf.get(fn, nullid), t1)
882 return self.file(fn).cmp(mf.get(fn, nullid), t1)
870
883
871 def mfmatches(node):
884 def mfmatches(node):
872 change = self.changelog.read(node)
885 change = self.changelog.read(node)
873 mf = self.manifest.read(change[0]).copy()
886 mf = self.manifest.read(change[0]).copy()
874 for fn in mf.keys():
887 for fn in mf.keys():
875 if not match(fn):
888 if not match(fn):
876 del mf[fn]
889 del mf[fn]
877 return mf
890 return mf
878
891
879 modified, added, removed, deleted, unknown = [], [], [], [], []
892 modified, added, removed, deleted, unknown = [], [], [], [], []
880 ignored, clean = [], []
893 ignored, clean = [], []
881
894
882 compareworking = False
895 compareworking = False
883 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
896 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
884 compareworking = True
897 compareworking = True
885
898
886 if not compareworking:
899 if not compareworking:
887 # read the manifest from node1 before the manifest from node2,
900 # read the manifest from node1 before the manifest from node2,
888 # so that we'll hit the manifest cache if we're going through
901 # so that we'll hit the manifest cache if we're going through
889 # all the revisions in parent->child order.
902 # all the revisions in parent->child order.
890 mf1 = mfmatches(node1)
903 mf1 = mfmatches(node1)
891
904
892 # are we comparing the working directory?
905 # are we comparing the working directory?
893 if not node2:
906 if not node2:
894 if not wlock:
907 if not wlock:
895 try:
908 try:
896 wlock = self.wlock(wait=0)
909 wlock = self.wlock(wait=0)
897 except lock.LockException:
910 except lock.LockException:
898 wlock = None
911 wlock = None
899 (lookup, modified, added, removed, deleted, unknown,
912 (lookup, modified, added, removed, deleted, unknown,
900 ignored, clean) = self.dirstate.status(files, match,
913 ignored, clean) = self.dirstate.status(files, match,
901 list_ignored, list_clean)
914 list_ignored, list_clean)
902
915
903 # are we comparing working dir against its parent?
916 # are we comparing working dir against its parent?
904 if compareworking:
917 if compareworking:
905 if lookup:
918 if lookup:
906 # do a full compare of any files that might have changed
919 # do a full compare of any files that might have changed
907 mf2 = mfmatches(self.dirstate.parents()[0])
920 mf2 = mfmatches(self.dirstate.parents()[0])
908 for f in lookup:
921 for f in lookup:
909 if fcmp(f, mf2):
922 if fcmp(f, mf2):
910 modified.append(f)
923 modified.append(f)
911 else:
924 else:
912 clean.append(f)
925 clean.append(f)
913 if wlock is not None:
926 if wlock is not None:
914 self.dirstate.update([f], "n")
927 self.dirstate.update([f], "n")
915 else:
928 else:
916 # we are comparing working dir against non-parent
929 # we are comparing working dir against non-parent
917 # generate a pseudo-manifest for the working dir
930 # generate a pseudo-manifest for the working dir
918 # XXX: create it in dirstate.py ?
931 # XXX: create it in dirstate.py ?
919 mf2 = mfmatches(self.dirstate.parents()[0])
932 mf2 = mfmatches(self.dirstate.parents()[0])
920 is_exec = util.execfunc(self.root, mf2.execf)
933 is_exec = util.execfunc(self.root, mf2.execf)
921 is_link = util.linkfunc(self.root, mf2.linkf)
934 is_link = util.linkfunc(self.root, mf2.linkf)
922 for f in lookup + modified + added:
935 for f in lookup + modified + added:
923 mf2[f] = ""
936 mf2[f] = ""
924 mf2.set(f, is_exec(f), is_link(f))
937 mf2.set(f, is_exec(f), is_link(f))
925 for f in removed:
938 for f in removed:
926 if f in mf2:
939 if f in mf2:
927 del mf2[f]
940 del mf2[f]
928 else:
941 else:
929 # we are comparing two revisions
942 # we are comparing two revisions
930 mf2 = mfmatches(node2)
943 mf2 = mfmatches(node2)
931
944
932 if not compareworking:
945 if not compareworking:
933 # flush lists from dirstate before comparing manifests
946 # flush lists from dirstate before comparing manifests
934 modified, added, clean = [], [], []
947 modified, added, clean = [], [], []
935
948
936 # make sure to sort the files so we talk to the disk in a
949 # make sure to sort the files so we talk to the disk in a
937 # reasonable order
950 # reasonable order
938 mf2keys = mf2.keys()
951 mf2keys = mf2.keys()
939 mf2keys.sort()
952 mf2keys.sort()
940 for fn in mf2keys:
953 for fn in mf2keys:
941 if mf1.has_key(fn):
954 if mf1.has_key(fn):
942 if mf1.flags(fn) != mf2.flags(fn) or \
955 if mf1.flags(fn) != mf2.flags(fn) or \
943 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
956 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
944 modified.append(fn)
957 modified.append(fn)
945 elif list_clean:
958 elif list_clean:
946 clean.append(fn)
959 clean.append(fn)
947 del mf1[fn]
960 del mf1[fn]
948 else:
961 else:
949 added.append(fn)
962 added.append(fn)
950
963
951 removed = mf1.keys()
964 removed = mf1.keys()
952
965
953 # sort and return results:
966 # sort and return results:
954 for l in modified, added, removed, deleted, unknown, ignored, clean:
967 for l in modified, added, removed, deleted, unknown, ignored, clean:
955 l.sort()
968 l.sort()
956 return (modified, added, removed, deleted, unknown, ignored, clean)
969 return (modified, added, removed, deleted, unknown, ignored, clean)
957
970
958 def add(self, list, wlock=None):
971 def add(self, list, wlock=None):
959 if not wlock:
972 if not wlock:
960 wlock = self.wlock()
973 wlock = self.wlock()
961 for f in list:
974 for f in list:
962 p = self.wjoin(f)
975 p = self.wjoin(f)
963 islink = os.path.islink(p)
976 islink = os.path.islink(p)
964 if not islink and not os.path.exists(p):
977 if not islink and not os.path.exists(p):
965 self.ui.warn(_("%s does not exist!\n") % f)
978 self.ui.warn(_("%s does not exist!\n") % f)
966 elif not islink and not os.path.isfile(p):
979 elif not islink and not os.path.isfile(p):
967 self.ui.warn(_("%s not added: only files and symlinks "
980 self.ui.warn(_("%s not added: only files and symlinks "
968 "supported currently\n") % f)
981 "supported currently\n") % f)
969 elif self.dirstate.state(f) in 'an':
982 elif self.dirstate.state(f) in 'an':
970 self.ui.warn(_("%s already tracked!\n") % f)
983 self.ui.warn(_("%s already tracked!\n") % f)
971 else:
984 else:
972 self.dirstate.update([f], "a")
985 self.dirstate.update([f], "a")
973
986
974 def forget(self, list, wlock=None):
987 def forget(self, list, wlock=None):
975 if not wlock:
988 if not wlock:
976 wlock = self.wlock()
989 wlock = self.wlock()
977 for f in list:
990 for f in list:
978 if self.dirstate.state(f) not in 'ai':
991 if self.dirstate.state(f) not in 'ai':
979 self.ui.warn(_("%s not added!\n") % f)
992 self.ui.warn(_("%s not added!\n") % f)
980 else:
993 else:
981 self.dirstate.forget([f])
994 self.dirstate.forget([f])
982
995
983 def remove(self, list, unlink=False, wlock=None):
996 def remove(self, list, unlink=False, wlock=None):
984 if unlink:
997 if unlink:
985 for f in list:
998 for f in list:
986 try:
999 try:
987 util.unlink(self.wjoin(f))
1000 util.unlink(self.wjoin(f))
988 except OSError, inst:
1001 except OSError, inst:
989 if inst.errno != errno.ENOENT:
1002 if inst.errno != errno.ENOENT:
990 raise
1003 raise
991 if not wlock:
1004 if not wlock:
992 wlock = self.wlock()
1005 wlock = self.wlock()
993 for f in list:
1006 for f in list:
994 p = self.wjoin(f)
1007 p = self.wjoin(f)
995 if os.path.exists(p):
1008 if os.path.exists(p):
996 self.ui.warn(_("%s still exists!\n") % f)
1009 self.ui.warn(_("%s still exists!\n") % f)
997 elif self.dirstate.state(f) == 'a':
1010 elif self.dirstate.state(f) == 'a':
998 self.dirstate.forget([f])
1011 self.dirstate.forget([f])
999 elif f not in self.dirstate:
1012 elif f not in self.dirstate:
1000 self.ui.warn(_("%s not tracked!\n") % f)
1013 self.ui.warn(_("%s not tracked!\n") % f)
1001 else:
1014 else:
1002 self.dirstate.update([f], "r")
1015 self.dirstate.update([f], "r")
1003
1016
1004 def undelete(self, list, wlock=None):
1017 def undelete(self, list, wlock=None):
1005 p = self.dirstate.parents()[0]
1018 p = self.dirstate.parents()[0]
1006 mn = self.changelog.read(p)[0]
1019 mn = self.changelog.read(p)[0]
1007 m = self.manifest.read(mn)
1020 m = self.manifest.read(mn)
1008 if not wlock:
1021 if not wlock:
1009 wlock = self.wlock()
1022 wlock = self.wlock()
1010 for f in list:
1023 for f in list:
1011 if self.dirstate.state(f) not in "r":
1024 if self.dirstate.state(f) not in "r":
1012 self.ui.warn("%s not removed!\n" % f)
1025 self.ui.warn("%s not removed!\n" % f)
1013 else:
1026 else:
1014 t = self.file(f).read(m[f])
1027 t = self.file(f).read(m[f])
1015 self.wwrite(f, t, m.flags(f))
1028 self.wwrite(f, t, m.flags(f))
1016 self.dirstate.update([f], "n")
1029 self.dirstate.update([f], "n")
1017
1030
1018 def copy(self, source, dest, wlock=None):
1031 def copy(self, source, dest, wlock=None):
1019 p = self.wjoin(dest)
1032 p = self.wjoin(dest)
1020 if not os.path.exists(p):
1033 if not os.path.exists(p):
1021 self.ui.warn(_("%s does not exist!\n") % dest)
1034 self.ui.warn(_("%s does not exist!\n") % dest)
1022 elif not os.path.isfile(p):
1035 elif not os.path.isfile(p):
1023 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1036 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1024 else:
1037 else:
1025 if not wlock:
1038 if not wlock:
1026 wlock = self.wlock()
1039 wlock = self.wlock()
1027 if self.dirstate.state(dest) == '?':
1040 if self.dirstate.state(dest) == '?':
1028 self.dirstate.update([dest], "a")
1041 self.dirstate.update([dest], "a")
1029 self.dirstate.copy(source, dest)
1042 self.dirstate.copy(source, dest)
1030
1043
1031 def heads(self, start=None):
1044 def heads(self, start=None):
1032 heads = self.changelog.heads(start)
1045 heads = self.changelog.heads(start)
1033 # sort the output in rev descending order
1046 # sort the output in rev descending order
1034 heads = [(-self.changelog.rev(h), h) for h in heads]
1047 heads = [(-self.changelog.rev(h), h) for h in heads]
1035 heads.sort()
1048 heads.sort()
1036 return [n for (r, n) in heads]
1049 return [n for (r, n) in heads]
1037
1050
1038 def branches(self, nodes):
1051 def branches(self, nodes):
1039 if not nodes:
1052 if not nodes:
1040 nodes = [self.changelog.tip()]
1053 nodes = [self.changelog.tip()]
1041 b = []
1054 b = []
1042 for n in nodes:
1055 for n in nodes:
1043 t = n
1056 t = n
1044 while 1:
1057 while 1:
1045 p = self.changelog.parents(n)
1058 p = self.changelog.parents(n)
1046 if p[1] != nullid or p[0] == nullid:
1059 if p[1] != nullid or p[0] == nullid:
1047 b.append((t, n, p[0], p[1]))
1060 b.append((t, n, p[0], p[1]))
1048 break
1061 break
1049 n = p[0]
1062 n = p[0]
1050 return b
1063 return b
1051
1064
1052 def between(self, pairs):
1065 def between(self, pairs):
1053 r = []
1066 r = []
1054
1067
1055 for top, bottom in pairs:
1068 for top, bottom in pairs:
1056 n, l, i = top, [], 0
1069 n, l, i = top, [], 0
1057 f = 1
1070 f = 1
1058
1071
1059 while n != bottom:
1072 while n != bottom:
1060 p = self.changelog.parents(n)[0]
1073 p = self.changelog.parents(n)[0]
1061 if i == f:
1074 if i == f:
1062 l.append(n)
1075 l.append(n)
1063 f = f * 2
1076 f = f * 2
1064 n = p
1077 n = p
1065 i += 1
1078 i += 1
1066
1079
1067 r.append(l)
1080 r.append(l)
1068
1081
1069 return r
1082 return r
1070
1083
1071 def findincoming(self, remote, base=None, heads=None, force=False):
1084 def findincoming(self, remote, base=None, heads=None, force=False):
1072 """Return list of roots of the subsets of missing nodes from remote
1085 """Return list of roots of the subsets of missing nodes from remote
1073
1086
1074 If base dict is specified, assume that these nodes and their parents
1087 If base dict is specified, assume that these nodes and their parents
1075 exist on the remote side and that no child of a node of base exists
1088 exist on the remote side and that no child of a node of base exists
1076 in both remote and self.
1089 in both remote and self.
1077 Furthermore base will be updated to include the nodes that exists
1090 Furthermore base will be updated to include the nodes that exists
1078 in self and remote but no children exists in self and remote.
1091 in self and remote but no children exists in self and remote.
1079 If a list of heads is specified, return only nodes which are heads
1092 If a list of heads is specified, return only nodes which are heads
1080 or ancestors of these heads.
1093 or ancestors of these heads.
1081
1094
1082 All the ancestors of base are in self and in remote.
1095 All the ancestors of base are in self and in remote.
1083 All the descendants of the list returned are missing in self.
1096 All the descendants of the list returned are missing in self.
1084 (and so we know that the rest of the nodes are missing in remote, see
1097 (and so we know that the rest of the nodes are missing in remote, see
1085 outgoing)
1098 outgoing)
1086 """
1099 """
1087 m = self.changelog.nodemap
1100 m = self.changelog.nodemap
1088 search = []
1101 search = []
1089 fetch = {}
1102 fetch = {}
1090 seen = {}
1103 seen = {}
1091 seenbranch = {}
1104 seenbranch = {}
1092 if base == None:
1105 if base == None:
1093 base = {}
1106 base = {}
1094
1107
1095 if not heads:
1108 if not heads:
1096 heads = remote.heads()
1109 heads = remote.heads()
1097
1110
1098 if self.changelog.tip() == nullid:
1111 if self.changelog.tip() == nullid:
1099 base[nullid] = 1
1112 base[nullid] = 1
1100 if heads != [nullid]:
1113 if heads != [nullid]:
1101 return [nullid]
1114 return [nullid]
1102 return []
1115 return []
1103
1116
1104 # assume we're closer to the tip than the root
1117 # assume we're closer to the tip than the root
1105 # and start by examining the heads
1118 # and start by examining the heads
1106 self.ui.status(_("searching for changes\n"))
1119 self.ui.status(_("searching for changes\n"))
1107
1120
1108 unknown = []
1121 unknown = []
1109 for h in heads:
1122 for h in heads:
1110 if h not in m:
1123 if h not in m:
1111 unknown.append(h)
1124 unknown.append(h)
1112 else:
1125 else:
1113 base[h] = 1
1126 base[h] = 1
1114
1127
1115 if not unknown:
1128 if not unknown:
1116 return []
1129 return []
1117
1130
1118 req = dict.fromkeys(unknown)
1131 req = dict.fromkeys(unknown)
1119 reqcnt = 0
1132 reqcnt = 0
1120
1133
1121 # search through remote branches
1134 # search through remote branches
1122 # a 'branch' here is a linear segment of history, with four parts:
1135 # a 'branch' here is a linear segment of history, with four parts:
1123 # head, root, first parent, second parent
1136 # head, root, first parent, second parent
1124 # (a branch always has two parents (or none) by definition)
1137 # (a branch always has two parents (or none) by definition)
1125 unknown = remote.branches(unknown)
1138 unknown = remote.branches(unknown)
1126 while unknown:
1139 while unknown:
1127 r = []
1140 r = []
1128 while unknown:
1141 while unknown:
1129 n = unknown.pop(0)
1142 n = unknown.pop(0)
1130 if n[0] in seen:
1143 if n[0] in seen:
1131 continue
1144 continue
1132
1145
1133 self.ui.debug(_("examining %s:%s\n")
1146 self.ui.debug(_("examining %s:%s\n")
1134 % (short(n[0]), short(n[1])))
1147 % (short(n[0]), short(n[1])))
1135 if n[0] == nullid: # found the end of the branch
1148 if n[0] == nullid: # found the end of the branch
1136 pass
1149 pass
1137 elif n in seenbranch:
1150 elif n in seenbranch:
1138 self.ui.debug(_("branch already found\n"))
1151 self.ui.debug(_("branch already found\n"))
1139 continue
1152 continue
1140 elif n[1] and n[1] in m: # do we know the base?
1153 elif n[1] and n[1] in m: # do we know the base?
1141 self.ui.debug(_("found incomplete branch %s:%s\n")
1154 self.ui.debug(_("found incomplete branch %s:%s\n")
1142 % (short(n[0]), short(n[1])))
1155 % (short(n[0]), short(n[1])))
1143 search.append(n) # schedule branch range for scanning
1156 search.append(n) # schedule branch range for scanning
1144 seenbranch[n] = 1
1157 seenbranch[n] = 1
1145 else:
1158 else:
1146 if n[1] not in seen and n[1] not in fetch:
1159 if n[1] not in seen and n[1] not in fetch:
1147 if n[2] in m and n[3] in m:
1160 if n[2] in m and n[3] in m:
1148 self.ui.debug(_("found new changeset %s\n") %
1161 self.ui.debug(_("found new changeset %s\n") %
1149 short(n[1]))
1162 short(n[1]))
1150 fetch[n[1]] = 1 # earliest unknown
1163 fetch[n[1]] = 1 # earliest unknown
1151 for p in n[2:4]:
1164 for p in n[2:4]:
1152 if p in m:
1165 if p in m:
1153 base[p] = 1 # latest known
1166 base[p] = 1 # latest known
1154
1167
1155 for p in n[2:4]:
1168 for p in n[2:4]:
1156 if p not in req and p not in m:
1169 if p not in req and p not in m:
1157 r.append(p)
1170 r.append(p)
1158 req[p] = 1
1171 req[p] = 1
1159 seen[n[0]] = 1
1172 seen[n[0]] = 1
1160
1173
1161 if r:
1174 if r:
1162 reqcnt += 1
1175 reqcnt += 1
1163 self.ui.debug(_("request %d: %s\n") %
1176 self.ui.debug(_("request %d: %s\n") %
1164 (reqcnt, " ".join(map(short, r))))
1177 (reqcnt, " ".join(map(short, r))))
1165 for p in xrange(0, len(r), 10):
1178 for p in xrange(0, len(r), 10):
1166 for b in remote.branches(r[p:p+10]):
1179 for b in remote.branches(r[p:p+10]):
1167 self.ui.debug(_("received %s:%s\n") %
1180 self.ui.debug(_("received %s:%s\n") %
1168 (short(b[0]), short(b[1])))
1181 (short(b[0]), short(b[1])))
1169 unknown.append(b)
1182 unknown.append(b)
1170
1183
1171 # do binary search on the branches we found
1184 # do binary search on the branches we found
1172 while search:
1185 while search:
1173 n = search.pop(0)
1186 n = search.pop(0)
1174 reqcnt += 1
1187 reqcnt += 1
1175 l = remote.between([(n[0], n[1])])[0]
1188 l = remote.between([(n[0], n[1])])[0]
1176 l.append(n[1])
1189 l.append(n[1])
1177 p = n[0]
1190 p = n[0]
1178 f = 1
1191 f = 1
1179 for i in l:
1192 for i in l:
1180 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1193 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1181 if i in m:
1194 if i in m:
1182 if f <= 2:
1195 if f <= 2:
1183 self.ui.debug(_("found new branch changeset %s\n") %
1196 self.ui.debug(_("found new branch changeset %s\n") %
1184 short(p))
1197 short(p))
1185 fetch[p] = 1
1198 fetch[p] = 1
1186 base[i] = 1
1199 base[i] = 1
1187 else:
1200 else:
1188 self.ui.debug(_("narrowed branch search to %s:%s\n")
1201 self.ui.debug(_("narrowed branch search to %s:%s\n")
1189 % (short(p), short(i)))
1202 % (short(p), short(i)))
1190 search.append((p, i))
1203 search.append((p, i))
1191 break
1204 break
1192 p, f = i, f * 2
1205 p, f = i, f * 2
1193
1206
1194 # sanity check our fetch list
1207 # sanity check our fetch list
1195 for f in fetch.keys():
1208 for f in fetch.keys():
1196 if f in m:
1209 if f in m:
1197 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1210 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1198
1211
1199 if base.keys() == [nullid]:
1212 if base.keys() == [nullid]:
1200 if force:
1213 if force:
1201 self.ui.warn(_("warning: repository is unrelated\n"))
1214 self.ui.warn(_("warning: repository is unrelated\n"))
1202 else:
1215 else:
1203 raise util.Abort(_("repository is unrelated"))
1216 raise util.Abort(_("repository is unrelated"))
1204
1217
1205 self.ui.debug(_("found new changesets starting at ") +
1218 self.ui.debug(_("found new changesets starting at ") +
1206 " ".join([short(f) for f in fetch]) + "\n")
1219 " ".join([short(f) for f in fetch]) + "\n")
1207
1220
1208 self.ui.debug(_("%d total queries\n") % reqcnt)
1221 self.ui.debug(_("%d total queries\n") % reqcnt)
1209
1222
1210 return fetch.keys()
1223 return fetch.keys()
1211
1224
1212 def findoutgoing(self, remote, base=None, heads=None, force=False):
1225 def findoutgoing(self, remote, base=None, heads=None, force=False):
1213 """Return list of nodes that are roots of subsets not in remote
1226 """Return list of nodes that are roots of subsets not in remote
1214
1227
1215 If base dict is specified, assume that these nodes and their parents
1228 If base dict is specified, assume that these nodes and their parents
1216 exist on the remote side.
1229 exist on the remote side.
1217 If a list of heads is specified, return only nodes which are heads
1230 If a list of heads is specified, return only nodes which are heads
1218 or ancestors of these heads, and return a second element which
1231 or ancestors of these heads, and return a second element which
1219 contains all remote heads which get new children.
1232 contains all remote heads which get new children.
1220 """
1233 """
1221 if base == None:
1234 if base == None:
1222 base = {}
1235 base = {}
1223 self.findincoming(remote, base, heads, force=force)
1236 self.findincoming(remote, base, heads, force=force)
1224
1237
1225 self.ui.debug(_("common changesets up to ")
1238 self.ui.debug(_("common changesets up to ")
1226 + " ".join(map(short, base.keys())) + "\n")
1239 + " ".join(map(short, base.keys())) + "\n")
1227
1240
1228 remain = dict.fromkeys(self.changelog.nodemap)
1241 remain = dict.fromkeys(self.changelog.nodemap)
1229
1242
1230 # prune everything remote has from the tree
1243 # prune everything remote has from the tree
1231 del remain[nullid]
1244 del remain[nullid]
1232 remove = base.keys()
1245 remove = base.keys()
1233 while remove:
1246 while remove:
1234 n = remove.pop(0)
1247 n = remove.pop(0)
1235 if n in remain:
1248 if n in remain:
1236 del remain[n]
1249 del remain[n]
1237 for p in self.changelog.parents(n):
1250 for p in self.changelog.parents(n):
1238 remove.append(p)
1251 remove.append(p)
1239
1252
1240 # find every node whose parents have been pruned
1253 # find every node whose parents have been pruned
1241 subset = []
1254 subset = []
1242 # find every remote head that will get new children
1255 # find every remote head that will get new children
1243 updated_heads = {}
1256 updated_heads = {}
1244 for n in remain:
1257 for n in remain:
1245 p1, p2 = self.changelog.parents(n)
1258 p1, p2 = self.changelog.parents(n)
1246 if p1 not in remain and p2 not in remain:
1259 if p1 not in remain and p2 not in remain:
1247 subset.append(n)
1260 subset.append(n)
1248 if heads:
1261 if heads:
1249 if p1 in heads:
1262 if p1 in heads:
1250 updated_heads[p1] = True
1263 updated_heads[p1] = True
1251 if p2 in heads:
1264 if p2 in heads:
1252 updated_heads[p2] = True
1265 updated_heads[p2] = True
1253
1266
1254 # this is the set of all roots we have to push
1267 # this is the set of all roots we have to push
1255 if heads:
1268 if heads:
1256 return subset, updated_heads.keys()
1269 return subset, updated_heads.keys()
1257 else:
1270 else:
1258 return subset
1271 return subset
1259
1272
1260 def pull(self, remote, heads=None, force=False, lock=None):
1273 def pull(self, remote, heads=None, force=False, lock=None):
1261 mylock = False
1274 mylock = False
1262 if not lock:
1275 if not lock:
1263 lock = self.lock()
1276 lock = self.lock()
1264 mylock = True
1277 mylock = True
1265
1278
1266 try:
1279 try:
1267 fetch = self.findincoming(remote, force=force)
1280 fetch = self.findincoming(remote, force=force)
1268 if fetch == [nullid]:
1281 if fetch == [nullid]:
1269 self.ui.status(_("requesting all changes\n"))
1282 self.ui.status(_("requesting all changes\n"))
1270
1283
1271 if not fetch:
1284 if not fetch:
1272 self.ui.status(_("no changes found\n"))
1285 self.ui.status(_("no changes found\n"))
1273 return 0
1286 return 0
1274
1287
1275 if heads is None:
1288 if heads is None:
1276 cg = remote.changegroup(fetch, 'pull')
1289 cg = remote.changegroup(fetch, 'pull')
1277 else:
1290 else:
1278 if 'changegroupsubset' not in remote.capabilities:
1291 if 'changegroupsubset' not in remote.capabilities:
1279 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1292 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1280 cg = remote.changegroupsubset(fetch, heads, 'pull')
1293 cg = remote.changegroupsubset(fetch, heads, 'pull')
1281 return self.addchangegroup(cg, 'pull', remote.url())
1294 return self.addchangegroup(cg, 'pull', remote.url())
1282 finally:
1295 finally:
1283 if mylock:
1296 if mylock:
1284 lock.release()
1297 lock.release()
1285
1298
1286 def push(self, remote, force=False, revs=None):
1299 def push(self, remote, force=False, revs=None):
1287 # there are two ways to push to remote repo:
1300 # there are two ways to push to remote repo:
1288 #
1301 #
1289 # addchangegroup assumes local user can lock remote
1302 # addchangegroup assumes local user can lock remote
1290 # repo (local filesystem, old ssh servers).
1303 # repo (local filesystem, old ssh servers).
1291 #
1304 #
1292 # unbundle assumes local user cannot lock remote repo (new ssh
1305 # unbundle assumes local user cannot lock remote repo (new ssh
1293 # servers, http servers).
1306 # servers, http servers).
1294
1307
1295 if remote.capable('unbundle'):
1308 if remote.capable('unbundle'):
1296 return self.push_unbundle(remote, force, revs)
1309 return self.push_unbundle(remote, force, revs)
1297 return self.push_addchangegroup(remote, force, revs)
1310 return self.push_addchangegroup(remote, force, revs)
1298
1311
1299 def prepush(self, remote, force, revs):
1312 def prepush(self, remote, force, revs):
1300 base = {}
1313 base = {}
1301 remote_heads = remote.heads()
1314 remote_heads = remote.heads()
1302 inc = self.findincoming(remote, base, remote_heads, force=force)
1315 inc = self.findincoming(remote, base, remote_heads, force=force)
1303
1316
1304 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1317 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1305 if revs is not None:
1318 if revs is not None:
1306 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1319 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1307 else:
1320 else:
1308 bases, heads = update, self.changelog.heads()
1321 bases, heads = update, self.changelog.heads()
1309
1322
1310 if not bases:
1323 if not bases:
1311 self.ui.status(_("no changes found\n"))
1324 self.ui.status(_("no changes found\n"))
1312 return None, 1
1325 return None, 1
1313 elif not force:
1326 elif not force:
1314 # check if we're creating new remote heads
1327 # check if we're creating new remote heads
1315 # to be a remote head after push, node must be either
1328 # to be a remote head after push, node must be either
1316 # - unknown locally
1329 # - unknown locally
1317 # - a local outgoing head descended from update
1330 # - a local outgoing head descended from update
1318 # - a remote head that's known locally and not
1331 # - a remote head that's known locally and not
1319 # ancestral to an outgoing head
1332 # ancestral to an outgoing head
1320
1333
1321 warn = 0
1334 warn = 0
1322
1335
1323 if remote_heads == [nullid]:
1336 if remote_heads == [nullid]:
1324 warn = 0
1337 warn = 0
1325 elif not revs and len(heads) > len(remote_heads):
1338 elif not revs and len(heads) > len(remote_heads):
1326 warn = 1
1339 warn = 1
1327 else:
1340 else:
1328 newheads = list(heads)
1341 newheads = list(heads)
1329 for r in remote_heads:
1342 for r in remote_heads:
1330 if r in self.changelog.nodemap:
1343 if r in self.changelog.nodemap:
1331 desc = self.changelog.heads(r, heads)
1344 desc = self.changelog.heads(r, heads)
1332 l = [h for h in heads if h in desc]
1345 l = [h for h in heads if h in desc]
1333 if not l:
1346 if not l:
1334 newheads.append(r)
1347 newheads.append(r)
1335 else:
1348 else:
1336 newheads.append(r)
1349 newheads.append(r)
1337 if len(newheads) > len(remote_heads):
1350 if len(newheads) > len(remote_heads):
1338 warn = 1
1351 warn = 1
1339
1352
1340 if warn:
1353 if warn:
1341 self.ui.warn(_("abort: push creates new remote branches!\n"))
1354 self.ui.warn(_("abort: push creates new remote branches!\n"))
1342 self.ui.status(_("(did you forget to merge?"
1355 self.ui.status(_("(did you forget to merge?"
1343 " use push -f to force)\n"))
1356 " use push -f to force)\n"))
1344 return None, 1
1357 return None, 1
1345 elif inc:
1358 elif inc:
1346 self.ui.warn(_("note: unsynced remote changes!\n"))
1359 self.ui.warn(_("note: unsynced remote changes!\n"))
1347
1360
1348
1361
1349 if revs is None:
1362 if revs is None:
1350 cg = self.changegroup(update, 'push')
1363 cg = self.changegroup(update, 'push')
1351 else:
1364 else:
1352 cg = self.changegroupsubset(update, revs, 'push')
1365 cg = self.changegroupsubset(update, revs, 'push')
1353 return cg, remote_heads
1366 return cg, remote_heads
1354
1367
1355 def push_addchangegroup(self, remote, force, revs):
1368 def push_addchangegroup(self, remote, force, revs):
1356 lock = remote.lock()
1369 lock = remote.lock()
1357
1370
1358 ret = self.prepush(remote, force, revs)
1371 ret = self.prepush(remote, force, revs)
1359 if ret[0] is not None:
1372 if ret[0] is not None:
1360 cg, remote_heads = ret
1373 cg, remote_heads = ret
1361 return remote.addchangegroup(cg, 'push', self.url())
1374 return remote.addchangegroup(cg, 'push', self.url())
1362 return ret[1]
1375 return ret[1]
1363
1376
1364 def push_unbundle(self, remote, force, revs):
1377 def push_unbundle(self, remote, force, revs):
1365 # local repo finds heads on server, finds out what revs it
1378 # local repo finds heads on server, finds out what revs it
1366 # must push. once revs transferred, if server finds it has
1379 # must push. once revs transferred, if server finds it has
1367 # different heads (someone else won commit/push race), server
1380 # different heads (someone else won commit/push race), server
1368 # aborts.
1381 # aborts.
1369
1382
1370 ret = self.prepush(remote, force, revs)
1383 ret = self.prepush(remote, force, revs)
1371 if ret[0] is not None:
1384 if ret[0] is not None:
1372 cg, remote_heads = ret
1385 cg, remote_heads = ret
1373 if force: remote_heads = ['force']
1386 if force: remote_heads = ['force']
1374 return remote.unbundle(cg, remote_heads, 'push')
1387 return remote.unbundle(cg, remote_heads, 'push')
1375 return ret[1]
1388 return ret[1]
1376
1389
1377 def changegroupinfo(self, nodes):
1390 def changegroupinfo(self, nodes):
1378 self.ui.note(_("%d changesets found\n") % len(nodes))
1391 self.ui.note(_("%d changesets found\n") % len(nodes))
1379 if self.ui.debugflag:
1392 if self.ui.debugflag:
1380 self.ui.debug(_("List of changesets:\n"))
1393 self.ui.debug(_("List of changesets:\n"))
1381 for node in nodes:
1394 for node in nodes:
1382 self.ui.debug("%s\n" % hex(node))
1395 self.ui.debug("%s\n" % hex(node))
1383
1396
1384 def changegroupsubset(self, bases, heads, source):
1397 def changegroupsubset(self, bases, heads, source):
1385 """This function generates a changegroup consisting of all the nodes
1398 """This function generates a changegroup consisting of all the nodes
1386 that are descendents of any of the bases, and ancestors of any of
1399 that are descendents of any of the bases, and ancestors of any of
1387 the heads.
1400 the heads.
1388
1401
1389 It is fairly complex as determining which filenodes and which
1402 It is fairly complex as determining which filenodes and which
1390 manifest nodes need to be included for the changeset to be complete
1403 manifest nodes need to be included for the changeset to be complete
1391 is non-trivial.
1404 is non-trivial.
1392
1405
1393 Another wrinkle is doing the reverse, figuring out which changeset in
1406 Another wrinkle is doing the reverse, figuring out which changeset in
1394 the changegroup a particular filenode or manifestnode belongs to."""
1407 the changegroup a particular filenode or manifestnode belongs to."""
1395
1408
1396 self.hook('preoutgoing', throw=True, source=source)
1409 self.hook('preoutgoing', throw=True, source=source)
1397
1410
1398 # Set up some initial variables
1411 # Set up some initial variables
1399 # Make it easy to refer to self.changelog
1412 # Make it easy to refer to self.changelog
1400 cl = self.changelog
1413 cl = self.changelog
1401 # msng is short for missing - compute the list of changesets in this
1414 # msng is short for missing - compute the list of changesets in this
1402 # changegroup.
1415 # changegroup.
1403 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1416 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1404 self.changegroupinfo(msng_cl_lst)
1417 self.changegroupinfo(msng_cl_lst)
1405 # Some bases may turn out to be superfluous, and some heads may be
1418 # Some bases may turn out to be superfluous, and some heads may be
1406 # too. nodesbetween will return the minimal set of bases and heads
1419 # too. nodesbetween will return the minimal set of bases and heads
1407 # necessary to re-create the changegroup.
1420 # necessary to re-create the changegroup.
1408
1421
1409 # Known heads are the list of heads that it is assumed the recipient
1422 # Known heads are the list of heads that it is assumed the recipient
1410 # of this changegroup will know about.
1423 # of this changegroup will know about.
1411 knownheads = {}
1424 knownheads = {}
1412 # We assume that all parents of bases are known heads.
1425 # We assume that all parents of bases are known heads.
1413 for n in bases:
1426 for n in bases:
1414 for p in cl.parents(n):
1427 for p in cl.parents(n):
1415 if p != nullid:
1428 if p != nullid:
1416 knownheads[p] = 1
1429 knownheads[p] = 1
1417 knownheads = knownheads.keys()
1430 knownheads = knownheads.keys()
1418 if knownheads:
1431 if knownheads:
1419 # Now that we know what heads are known, we can compute which
1432 # Now that we know what heads are known, we can compute which
1420 # changesets are known. The recipient must know about all
1433 # changesets are known. The recipient must know about all
1421 # changesets required to reach the known heads from the null
1434 # changesets required to reach the known heads from the null
1422 # changeset.
1435 # changeset.
1423 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1436 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1424 junk = None
1437 junk = None
1425 # Transform the list into an ersatz set.
1438 # Transform the list into an ersatz set.
1426 has_cl_set = dict.fromkeys(has_cl_set)
1439 has_cl_set = dict.fromkeys(has_cl_set)
1427 else:
1440 else:
1428 # If there were no known heads, the recipient cannot be assumed to
1441 # If there were no known heads, the recipient cannot be assumed to
1429 # know about any changesets.
1442 # know about any changesets.
1430 has_cl_set = {}
1443 has_cl_set = {}
1431
1444
1432 # Make it easy to refer to self.manifest
1445 # Make it easy to refer to self.manifest
1433 mnfst = self.manifest
1446 mnfst = self.manifest
1434 # We don't know which manifests are missing yet
1447 # We don't know which manifests are missing yet
1435 msng_mnfst_set = {}
1448 msng_mnfst_set = {}
1436 # Nor do we know which filenodes are missing.
1449 # Nor do we know which filenodes are missing.
1437 msng_filenode_set = {}
1450 msng_filenode_set = {}
1438
1451
1439 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1452 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1440 junk = None
1453 junk = None
1441
1454
1442 # A changeset always belongs to itself, so the changenode lookup
1455 # A changeset always belongs to itself, so the changenode lookup
1443 # function for a changenode is identity.
1456 # function for a changenode is identity.
1444 def identity(x):
1457 def identity(x):
1445 return x
1458 return x
1446
1459
1447 # A function generating function. Sets up an environment for the
1460 # A function generating function. Sets up an environment for the
1448 # inner function.
1461 # inner function.
1449 def cmp_by_rev_func(revlog):
1462 def cmp_by_rev_func(revlog):
1450 # Compare two nodes by their revision number in the environment's
1463 # Compare two nodes by their revision number in the environment's
1451 # revision history. Since the revision number both represents the
1464 # revision history. Since the revision number both represents the
1452 # most efficient order to read the nodes in, and represents a
1465 # most efficient order to read the nodes in, and represents a
1453 # topological sorting of the nodes, this function is often useful.
1466 # topological sorting of the nodes, this function is often useful.
1454 def cmp_by_rev(a, b):
1467 def cmp_by_rev(a, b):
1455 return cmp(revlog.rev(a), revlog.rev(b))
1468 return cmp(revlog.rev(a), revlog.rev(b))
1456 return cmp_by_rev
1469 return cmp_by_rev
1457
1470
1458 # If we determine that a particular file or manifest node must be a
1471 # If we determine that a particular file or manifest node must be a
1459 # node that the recipient of the changegroup will already have, we can
1472 # node that the recipient of the changegroup will already have, we can
1460 # also assume the recipient will have all the parents. This function
1473 # also assume the recipient will have all the parents. This function
1461 # prunes them from the set of missing nodes.
1474 # prunes them from the set of missing nodes.
1462 def prune_parents(revlog, hasset, msngset):
1475 def prune_parents(revlog, hasset, msngset):
1463 haslst = hasset.keys()
1476 haslst = hasset.keys()
1464 haslst.sort(cmp_by_rev_func(revlog))
1477 haslst.sort(cmp_by_rev_func(revlog))
1465 for node in haslst:
1478 for node in haslst:
1466 parentlst = [p for p in revlog.parents(node) if p != nullid]
1479 parentlst = [p for p in revlog.parents(node) if p != nullid]
1467 while parentlst:
1480 while parentlst:
1468 n = parentlst.pop()
1481 n = parentlst.pop()
1469 if n not in hasset:
1482 if n not in hasset:
1470 hasset[n] = 1
1483 hasset[n] = 1
1471 p = [p for p in revlog.parents(n) if p != nullid]
1484 p = [p for p in revlog.parents(n) if p != nullid]
1472 parentlst.extend(p)
1485 parentlst.extend(p)
1473 for n in hasset:
1486 for n in hasset:
1474 msngset.pop(n, None)
1487 msngset.pop(n, None)
1475
1488
1476 # This is a function generating function used to set up an environment
1489 # This is a function generating function used to set up an environment
1477 # for the inner function to execute in.
1490 # for the inner function to execute in.
1478 def manifest_and_file_collector(changedfileset):
1491 def manifest_and_file_collector(changedfileset):
1479 # This is an information gathering function that gathers
1492 # This is an information gathering function that gathers
1480 # information from each changeset node that goes out as part of
1493 # information from each changeset node that goes out as part of
1481 # the changegroup. The information gathered is a list of which
1494 # the changegroup. The information gathered is a list of which
1482 # manifest nodes are potentially required (the recipient may
1495 # manifest nodes are potentially required (the recipient may
1483 # already have them) and total list of all files which were
1496 # already have them) and total list of all files which were
1484 # changed in any changeset in the changegroup.
1497 # changed in any changeset in the changegroup.
1485 #
1498 #
1486 # We also remember the first changenode we saw any manifest
1499 # We also remember the first changenode we saw any manifest
1487 # referenced by so we can later determine which changenode 'owns'
1500 # referenced by so we can later determine which changenode 'owns'
1488 # the manifest.
1501 # the manifest.
1489 def collect_manifests_and_files(clnode):
1502 def collect_manifests_and_files(clnode):
1490 c = cl.read(clnode)
1503 c = cl.read(clnode)
1491 for f in c[3]:
1504 for f in c[3]:
1492 # This is to make sure we only have one instance of each
1505 # This is to make sure we only have one instance of each
1493 # filename string for each filename.
1506 # filename string for each filename.
1494 changedfileset.setdefault(f, f)
1507 changedfileset.setdefault(f, f)
1495 msng_mnfst_set.setdefault(c[0], clnode)
1508 msng_mnfst_set.setdefault(c[0], clnode)
1496 return collect_manifests_and_files
1509 return collect_manifests_and_files
1497
1510
1498 # Figure out which manifest nodes (of the ones we think might be part
1511 # Figure out which manifest nodes (of the ones we think might be part
1499 # of the changegroup) the recipient must know about and remove them
1512 # of the changegroup) the recipient must know about and remove them
1500 # from the changegroup.
1513 # from the changegroup.
1501 def prune_manifests():
1514 def prune_manifests():
1502 has_mnfst_set = {}
1515 has_mnfst_set = {}
1503 for n in msng_mnfst_set:
1516 for n in msng_mnfst_set:
1504 # If a 'missing' manifest thinks it belongs to a changenode
1517 # If a 'missing' manifest thinks it belongs to a changenode
1505 # the recipient is assumed to have, obviously the recipient
1518 # the recipient is assumed to have, obviously the recipient
1506 # must have that manifest.
1519 # must have that manifest.
1507 linknode = cl.node(mnfst.linkrev(n))
1520 linknode = cl.node(mnfst.linkrev(n))
1508 if linknode in has_cl_set:
1521 if linknode in has_cl_set:
1509 has_mnfst_set[n] = 1
1522 has_mnfst_set[n] = 1
1510 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1523 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1511
1524
1512 # Use the information collected in collect_manifests_and_files to say
1525 # Use the information collected in collect_manifests_and_files to say
1513 # which changenode any manifestnode belongs to.
1526 # which changenode any manifestnode belongs to.
1514 def lookup_manifest_link(mnfstnode):
1527 def lookup_manifest_link(mnfstnode):
1515 return msng_mnfst_set[mnfstnode]
1528 return msng_mnfst_set[mnfstnode]
1516
1529
1517 # A function generating function that sets up the initial environment
1530 # A function generating function that sets up the initial environment
1518 # the inner function.
1531 # the inner function.
1519 def filenode_collector(changedfiles):
1532 def filenode_collector(changedfiles):
1520 next_rev = [0]
1533 next_rev = [0]
1521 # This gathers information from each manifestnode included in the
1534 # This gathers information from each manifestnode included in the
1522 # changegroup about which filenodes the manifest node references
1535 # changegroup about which filenodes the manifest node references
1523 # so we can include those in the changegroup too.
1536 # so we can include those in the changegroup too.
1524 #
1537 #
1525 # It also remembers which changenode each filenode belongs to. It
1538 # It also remembers which changenode each filenode belongs to. It
1526 # does this by assuming the a filenode belongs to the changenode
1539 # does this by assuming the a filenode belongs to the changenode
1527 # the first manifest that references it belongs to.
1540 # the first manifest that references it belongs to.
1528 def collect_msng_filenodes(mnfstnode):
1541 def collect_msng_filenodes(mnfstnode):
1529 r = mnfst.rev(mnfstnode)
1542 r = mnfst.rev(mnfstnode)
1530 if r == next_rev[0]:
1543 if r == next_rev[0]:
1531 # If the last rev we looked at was the one just previous,
1544 # If the last rev we looked at was the one just previous,
1532 # we only need to see a diff.
1545 # we only need to see a diff.
1533 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1546 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1534 # For each line in the delta
1547 # For each line in the delta
1535 for dline in delta.splitlines():
1548 for dline in delta.splitlines():
1536 # get the filename and filenode for that line
1549 # get the filename and filenode for that line
1537 f, fnode = dline.split('\0')
1550 f, fnode = dline.split('\0')
1538 fnode = bin(fnode[:40])
1551 fnode = bin(fnode[:40])
1539 f = changedfiles.get(f, None)
1552 f = changedfiles.get(f, None)
1540 # And if the file is in the list of files we care
1553 # And if the file is in the list of files we care
1541 # about.
1554 # about.
1542 if f is not None:
1555 if f is not None:
1543 # Get the changenode this manifest belongs to
1556 # Get the changenode this manifest belongs to
1544 clnode = msng_mnfst_set[mnfstnode]
1557 clnode = msng_mnfst_set[mnfstnode]
1545 # Create the set of filenodes for the file if
1558 # Create the set of filenodes for the file if
1546 # there isn't one already.
1559 # there isn't one already.
1547 ndset = msng_filenode_set.setdefault(f, {})
1560 ndset = msng_filenode_set.setdefault(f, {})
1548 # And set the filenode's changelog node to the
1561 # And set the filenode's changelog node to the
1549 # manifest's if it hasn't been set already.
1562 # manifest's if it hasn't been set already.
1550 ndset.setdefault(fnode, clnode)
1563 ndset.setdefault(fnode, clnode)
1551 else:
1564 else:
1552 # Otherwise we need a full manifest.
1565 # Otherwise we need a full manifest.
1553 m = mnfst.read(mnfstnode)
1566 m = mnfst.read(mnfstnode)
1554 # For every file in we care about.
1567 # For every file in we care about.
1555 for f in changedfiles:
1568 for f in changedfiles:
1556 fnode = m.get(f, None)
1569 fnode = m.get(f, None)
1557 # If it's in the manifest
1570 # If it's in the manifest
1558 if fnode is not None:
1571 if fnode is not None:
1559 # See comments above.
1572 # See comments above.
1560 clnode = msng_mnfst_set[mnfstnode]
1573 clnode = msng_mnfst_set[mnfstnode]
1561 ndset = msng_filenode_set.setdefault(f, {})
1574 ndset = msng_filenode_set.setdefault(f, {})
1562 ndset.setdefault(fnode, clnode)
1575 ndset.setdefault(fnode, clnode)
1563 # Remember the revision we hope to see next.
1576 # Remember the revision we hope to see next.
1564 next_rev[0] = r + 1
1577 next_rev[0] = r + 1
1565 return collect_msng_filenodes
1578 return collect_msng_filenodes
1566
1579
1567 # We have a list of filenodes we think we need for a file, lets remove
1580 # We have a list of filenodes we think we need for a file, lets remove
1568 # all those we now the recipient must have.
1581 # all those we now the recipient must have.
1569 def prune_filenodes(f, filerevlog):
1582 def prune_filenodes(f, filerevlog):
1570 msngset = msng_filenode_set[f]
1583 msngset = msng_filenode_set[f]
1571 hasset = {}
1584 hasset = {}
1572 # If a 'missing' filenode thinks it belongs to a changenode we
1585 # If a 'missing' filenode thinks it belongs to a changenode we
1573 # assume the recipient must have, then the recipient must have
1586 # assume the recipient must have, then the recipient must have
1574 # that filenode.
1587 # that filenode.
1575 for n in msngset:
1588 for n in msngset:
1576 clnode = cl.node(filerevlog.linkrev(n))
1589 clnode = cl.node(filerevlog.linkrev(n))
1577 if clnode in has_cl_set:
1590 if clnode in has_cl_set:
1578 hasset[n] = 1
1591 hasset[n] = 1
1579 prune_parents(filerevlog, hasset, msngset)
1592 prune_parents(filerevlog, hasset, msngset)
1580
1593
1581 # A function generator function that sets up the a context for the
1594 # A function generator function that sets up the a context for the
1582 # inner function.
1595 # inner function.
1583 def lookup_filenode_link_func(fname):
1596 def lookup_filenode_link_func(fname):
1584 msngset = msng_filenode_set[fname]
1597 msngset = msng_filenode_set[fname]
1585 # Lookup the changenode the filenode belongs to.
1598 # Lookup the changenode the filenode belongs to.
1586 def lookup_filenode_link(fnode):
1599 def lookup_filenode_link(fnode):
1587 return msngset[fnode]
1600 return msngset[fnode]
1588 return lookup_filenode_link
1601 return lookup_filenode_link
1589
1602
1590 # Now that we have all theses utility functions to help out and
1603 # Now that we have all theses utility functions to help out and
1591 # logically divide up the task, generate the group.
1604 # logically divide up the task, generate the group.
1592 def gengroup():
1605 def gengroup():
1593 # The set of changed files starts empty.
1606 # The set of changed files starts empty.
1594 changedfiles = {}
1607 changedfiles = {}
1595 # Create a changenode group generator that will call our functions
1608 # Create a changenode group generator that will call our functions
1596 # back to lookup the owning changenode and collect information.
1609 # back to lookup the owning changenode and collect information.
1597 group = cl.group(msng_cl_lst, identity,
1610 group = cl.group(msng_cl_lst, identity,
1598 manifest_and_file_collector(changedfiles))
1611 manifest_and_file_collector(changedfiles))
1599 for chnk in group:
1612 for chnk in group:
1600 yield chnk
1613 yield chnk
1601
1614
1602 # The list of manifests has been collected by the generator
1615 # The list of manifests has been collected by the generator
1603 # calling our functions back.
1616 # calling our functions back.
1604 prune_manifests()
1617 prune_manifests()
1605 msng_mnfst_lst = msng_mnfst_set.keys()
1618 msng_mnfst_lst = msng_mnfst_set.keys()
1606 # Sort the manifestnodes by revision number.
1619 # Sort the manifestnodes by revision number.
1607 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1620 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1608 # Create a generator for the manifestnodes that calls our lookup
1621 # Create a generator for the manifestnodes that calls our lookup
1609 # and data collection functions back.
1622 # and data collection functions back.
1610 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1623 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1611 filenode_collector(changedfiles))
1624 filenode_collector(changedfiles))
1612 for chnk in group:
1625 for chnk in group:
1613 yield chnk
1626 yield chnk
1614
1627
1615 # These are no longer needed, dereference and toss the memory for
1628 # These are no longer needed, dereference and toss the memory for
1616 # them.
1629 # them.
1617 msng_mnfst_lst = None
1630 msng_mnfst_lst = None
1618 msng_mnfst_set.clear()
1631 msng_mnfst_set.clear()
1619
1632
1620 changedfiles = changedfiles.keys()
1633 changedfiles = changedfiles.keys()
1621 changedfiles.sort()
1634 changedfiles.sort()
1622 # Go through all our files in order sorted by name.
1635 # Go through all our files in order sorted by name.
1623 for fname in changedfiles:
1636 for fname in changedfiles:
1624 filerevlog = self.file(fname)
1637 filerevlog = self.file(fname)
1625 # Toss out the filenodes that the recipient isn't really
1638 # Toss out the filenodes that the recipient isn't really
1626 # missing.
1639 # missing.
1627 if msng_filenode_set.has_key(fname):
1640 if msng_filenode_set.has_key(fname):
1628 prune_filenodes(fname, filerevlog)
1641 prune_filenodes(fname, filerevlog)
1629 msng_filenode_lst = msng_filenode_set[fname].keys()
1642 msng_filenode_lst = msng_filenode_set[fname].keys()
1630 else:
1643 else:
1631 msng_filenode_lst = []
1644 msng_filenode_lst = []
1632 # If any filenodes are left, generate the group for them,
1645 # If any filenodes are left, generate the group for them,
1633 # otherwise don't bother.
1646 # otherwise don't bother.
1634 if len(msng_filenode_lst) > 0:
1647 if len(msng_filenode_lst) > 0:
1635 yield changegroup.genchunk(fname)
1648 yield changegroup.genchunk(fname)
1636 # Sort the filenodes by their revision #
1649 # Sort the filenodes by their revision #
1637 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1650 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1638 # Create a group generator and only pass in a changenode
1651 # Create a group generator and only pass in a changenode
1639 # lookup function as we need to collect no information
1652 # lookup function as we need to collect no information
1640 # from filenodes.
1653 # from filenodes.
1641 group = filerevlog.group(msng_filenode_lst,
1654 group = filerevlog.group(msng_filenode_lst,
1642 lookup_filenode_link_func(fname))
1655 lookup_filenode_link_func(fname))
1643 for chnk in group:
1656 for chnk in group:
1644 yield chnk
1657 yield chnk
1645 if msng_filenode_set.has_key(fname):
1658 if msng_filenode_set.has_key(fname):
1646 # Don't need this anymore, toss it to free memory.
1659 # Don't need this anymore, toss it to free memory.
1647 del msng_filenode_set[fname]
1660 del msng_filenode_set[fname]
1648 # Signal that no more groups are left.
1661 # Signal that no more groups are left.
1649 yield changegroup.closechunk()
1662 yield changegroup.closechunk()
1650
1663
1651 if msng_cl_lst:
1664 if msng_cl_lst:
1652 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1665 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1653
1666
1654 return util.chunkbuffer(gengroup())
1667 return util.chunkbuffer(gengroup())
1655
1668
1656 def changegroup(self, basenodes, source):
1669 def changegroup(self, basenodes, source):
1657 """Generate a changegroup of all nodes that we have that a recipient
1670 """Generate a changegroup of all nodes that we have that a recipient
1658 doesn't.
1671 doesn't.
1659
1672
1660 This is much easier than the previous function as we can assume that
1673 This is much easier than the previous function as we can assume that
1661 the recipient has any changenode we aren't sending them."""
1674 the recipient has any changenode we aren't sending them."""
1662
1675
1663 self.hook('preoutgoing', throw=True, source=source)
1676 self.hook('preoutgoing', throw=True, source=source)
1664
1677
1665 cl = self.changelog
1678 cl = self.changelog
1666 nodes = cl.nodesbetween(basenodes, None)[0]
1679 nodes = cl.nodesbetween(basenodes, None)[0]
1667 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1680 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1668 self.changegroupinfo(nodes)
1681 self.changegroupinfo(nodes)
1669
1682
1670 def identity(x):
1683 def identity(x):
1671 return x
1684 return x
1672
1685
1673 def gennodelst(revlog):
1686 def gennodelst(revlog):
1674 for r in xrange(0, revlog.count()):
1687 for r in xrange(0, revlog.count()):
1675 n = revlog.node(r)
1688 n = revlog.node(r)
1676 if revlog.linkrev(n) in revset:
1689 if revlog.linkrev(n) in revset:
1677 yield n
1690 yield n
1678
1691
1679 def changed_file_collector(changedfileset):
1692 def changed_file_collector(changedfileset):
1680 def collect_changed_files(clnode):
1693 def collect_changed_files(clnode):
1681 c = cl.read(clnode)
1694 c = cl.read(clnode)
1682 for fname in c[3]:
1695 for fname in c[3]:
1683 changedfileset[fname] = 1
1696 changedfileset[fname] = 1
1684 return collect_changed_files
1697 return collect_changed_files
1685
1698
1686 def lookuprevlink_func(revlog):
1699 def lookuprevlink_func(revlog):
1687 def lookuprevlink(n):
1700 def lookuprevlink(n):
1688 return cl.node(revlog.linkrev(n))
1701 return cl.node(revlog.linkrev(n))
1689 return lookuprevlink
1702 return lookuprevlink
1690
1703
1691 def gengroup():
1704 def gengroup():
1692 # construct a list of all changed files
1705 # construct a list of all changed files
1693 changedfiles = {}
1706 changedfiles = {}
1694
1707
1695 for chnk in cl.group(nodes, identity,
1708 for chnk in cl.group(nodes, identity,
1696 changed_file_collector(changedfiles)):
1709 changed_file_collector(changedfiles)):
1697 yield chnk
1710 yield chnk
1698 changedfiles = changedfiles.keys()
1711 changedfiles = changedfiles.keys()
1699 changedfiles.sort()
1712 changedfiles.sort()
1700
1713
1701 mnfst = self.manifest
1714 mnfst = self.manifest
1702 nodeiter = gennodelst(mnfst)
1715 nodeiter = gennodelst(mnfst)
1703 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1716 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1704 yield chnk
1717 yield chnk
1705
1718
1706 for fname in changedfiles:
1719 for fname in changedfiles:
1707 filerevlog = self.file(fname)
1720 filerevlog = self.file(fname)
1708 nodeiter = gennodelst(filerevlog)
1721 nodeiter = gennodelst(filerevlog)
1709 nodeiter = list(nodeiter)
1722 nodeiter = list(nodeiter)
1710 if nodeiter:
1723 if nodeiter:
1711 yield changegroup.genchunk(fname)
1724 yield changegroup.genchunk(fname)
1712 lookup = lookuprevlink_func(filerevlog)
1725 lookup = lookuprevlink_func(filerevlog)
1713 for chnk in filerevlog.group(nodeiter, lookup):
1726 for chnk in filerevlog.group(nodeiter, lookup):
1714 yield chnk
1727 yield chnk
1715
1728
1716 yield changegroup.closechunk()
1729 yield changegroup.closechunk()
1717
1730
1718 if nodes:
1731 if nodes:
1719 self.hook('outgoing', node=hex(nodes[0]), source=source)
1732 self.hook('outgoing', node=hex(nodes[0]), source=source)
1720
1733
1721 return util.chunkbuffer(gengroup())
1734 return util.chunkbuffer(gengroup())
1722
1735
1723 def addchangegroup(self, source, srctype, url):
1736 def addchangegroup(self, source, srctype, url):
1724 """add changegroup to repo.
1737 """add changegroup to repo.
1725
1738
1726 return values:
1739 return values:
1727 - nothing changed or no source: 0
1740 - nothing changed or no source: 0
1728 - more heads than before: 1+added heads (2..n)
1741 - more heads than before: 1+added heads (2..n)
1729 - less heads than before: -1-removed heads (-2..-n)
1742 - less heads than before: -1-removed heads (-2..-n)
1730 - number of heads stays the same: 1
1743 - number of heads stays the same: 1
1731 """
1744 """
1732 def csmap(x):
1745 def csmap(x):
1733 self.ui.debug(_("add changeset %s\n") % short(x))
1746 self.ui.debug(_("add changeset %s\n") % short(x))
1734 return cl.count()
1747 return cl.count()
1735
1748
1736 def revmap(x):
1749 def revmap(x):
1737 return cl.rev(x)
1750 return cl.rev(x)
1738
1751
1739 if not source:
1752 if not source:
1740 return 0
1753 return 0
1741
1754
1742 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1755 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1743
1756
1744 changesets = files = revisions = 0
1757 changesets = files = revisions = 0
1745
1758
1746 tr = self.transaction()
1759 tr = self.transaction()
1747
1760
1748 # write changelog data to temp files so concurrent readers will not see
1761 # write changelog data to temp files so concurrent readers will not see
1749 # inconsistent view
1762 # inconsistent view
1750 cl = None
1763 cl = None
1751 try:
1764 try:
1752 cl = appendfile.appendchangelog(self.sopener,
1765 cl = appendfile.appendchangelog(self.sopener,
1753 self.changelog.version)
1766 self.changelog.version)
1754
1767
1755 oldheads = len(cl.heads())
1768 oldheads = len(cl.heads())
1756
1769
1757 # pull off the changeset group
1770 # pull off the changeset group
1758 self.ui.status(_("adding changesets\n"))
1771 self.ui.status(_("adding changesets\n"))
1759 cor = cl.count() - 1
1772 cor = cl.count() - 1
1760 chunkiter = changegroup.chunkiter(source)
1773 chunkiter = changegroup.chunkiter(source)
1761 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1774 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1762 raise util.Abort(_("received changelog group is empty"))
1775 raise util.Abort(_("received changelog group is empty"))
1763 cnr = cl.count() - 1
1776 cnr = cl.count() - 1
1764 changesets = cnr - cor
1777 changesets = cnr - cor
1765
1778
1766 # pull off the manifest group
1779 # pull off the manifest group
1767 self.ui.status(_("adding manifests\n"))
1780 self.ui.status(_("adding manifests\n"))
1768 chunkiter = changegroup.chunkiter(source)
1781 chunkiter = changegroup.chunkiter(source)
1769 # no need to check for empty manifest group here:
1782 # no need to check for empty manifest group here:
1770 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1783 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1771 # no new manifest will be created and the manifest group will
1784 # no new manifest will be created and the manifest group will
1772 # be empty during the pull
1785 # be empty during the pull
1773 self.manifest.addgroup(chunkiter, revmap, tr)
1786 self.manifest.addgroup(chunkiter, revmap, tr)
1774
1787
1775 # process the files
1788 # process the files
1776 self.ui.status(_("adding file changes\n"))
1789 self.ui.status(_("adding file changes\n"))
1777 while 1:
1790 while 1:
1778 f = changegroup.getchunk(source)
1791 f = changegroup.getchunk(source)
1779 if not f:
1792 if not f:
1780 break
1793 break
1781 self.ui.debug(_("adding %s revisions\n") % f)
1794 self.ui.debug(_("adding %s revisions\n") % f)
1782 fl = self.file(f)
1795 fl = self.file(f)
1783 o = fl.count()
1796 o = fl.count()
1784 chunkiter = changegroup.chunkiter(source)
1797 chunkiter = changegroup.chunkiter(source)
1785 if fl.addgroup(chunkiter, revmap, tr) is None:
1798 if fl.addgroup(chunkiter, revmap, tr) is None:
1786 raise util.Abort(_("received file revlog group is empty"))
1799 raise util.Abort(_("received file revlog group is empty"))
1787 revisions += fl.count() - o
1800 revisions += fl.count() - o
1788 files += 1
1801 files += 1
1789
1802
1790 cl.writedata()
1803 cl.writedata()
1791 finally:
1804 finally:
1792 if cl:
1805 if cl:
1793 cl.cleanup()
1806 cl.cleanup()
1794
1807
1795 # make changelog see real files again
1808 # make changelog see real files again
1796 self.changelog = changelog.changelog(self.sopener,
1809 self.changelog = changelog.changelog(self.sopener,
1797 self.changelog.version)
1810 self.changelog.version)
1798 self.changelog.checkinlinesize(tr)
1811 self.changelog.checkinlinesize(tr)
1799
1812
1800 newheads = len(self.changelog.heads())
1813 newheads = len(self.changelog.heads())
1801 heads = ""
1814 heads = ""
1802 if oldheads and newheads != oldheads:
1815 if oldheads and newheads != oldheads:
1803 heads = _(" (%+d heads)") % (newheads - oldheads)
1816 heads = _(" (%+d heads)") % (newheads - oldheads)
1804
1817
1805 self.ui.status(_("added %d changesets"
1818 self.ui.status(_("added %d changesets"
1806 " with %d changes to %d files%s\n")
1819 " with %d changes to %d files%s\n")
1807 % (changesets, revisions, files, heads))
1820 % (changesets, revisions, files, heads))
1808
1821
1809 if changesets > 0:
1822 if changesets > 0:
1810 self.hook('pretxnchangegroup', throw=True,
1823 self.hook('pretxnchangegroup', throw=True,
1811 node=hex(self.changelog.node(cor+1)), source=srctype,
1824 node=hex(self.changelog.node(cor+1)), source=srctype,
1812 url=url)
1825 url=url)
1813
1826
1814 tr.close()
1827 tr.close()
1815
1828
1816 if changesets > 0:
1829 if changesets > 0:
1817 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1830 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1818 source=srctype, url=url)
1831 source=srctype, url=url)
1819
1832
1820 for i in xrange(cor + 1, cnr + 1):
1833 for i in xrange(cor + 1, cnr + 1):
1821 self.hook("incoming", node=hex(self.changelog.node(i)),
1834 self.hook("incoming", node=hex(self.changelog.node(i)),
1822 source=srctype, url=url)
1835 source=srctype, url=url)
1823
1836
1824 # never return 0 here:
1837 # never return 0 here:
1825 if newheads < oldheads:
1838 if newheads < oldheads:
1826 return newheads - oldheads - 1
1839 return newheads - oldheads - 1
1827 else:
1840 else:
1828 return newheads - oldheads + 1
1841 return newheads - oldheads + 1
1829
1842
1830
1843
1831 def stream_in(self, remote):
1844 def stream_in(self, remote):
1832 fp = remote.stream_out()
1845 fp = remote.stream_out()
1833 l = fp.readline()
1846 l = fp.readline()
1834 try:
1847 try:
1835 resp = int(l)
1848 resp = int(l)
1836 except ValueError:
1849 except ValueError:
1837 raise util.UnexpectedOutput(
1850 raise util.UnexpectedOutput(
1838 _('Unexpected response from remote server:'), l)
1851 _('Unexpected response from remote server:'), l)
1839 if resp == 1:
1852 if resp == 1:
1840 raise util.Abort(_('operation forbidden by server'))
1853 raise util.Abort(_('operation forbidden by server'))
1841 elif resp == 2:
1854 elif resp == 2:
1842 raise util.Abort(_('locking the remote repository failed'))
1855 raise util.Abort(_('locking the remote repository failed'))
1843 elif resp != 0:
1856 elif resp != 0:
1844 raise util.Abort(_('the server sent an unknown error code'))
1857 raise util.Abort(_('the server sent an unknown error code'))
1845 self.ui.status(_('streaming all changes\n'))
1858 self.ui.status(_('streaming all changes\n'))
1846 l = fp.readline()
1859 l = fp.readline()
1847 try:
1860 try:
1848 total_files, total_bytes = map(int, l.split(' ', 1))
1861 total_files, total_bytes = map(int, l.split(' ', 1))
1849 except ValueError, TypeError:
1862 except ValueError, TypeError:
1850 raise util.UnexpectedOutput(
1863 raise util.UnexpectedOutput(
1851 _('Unexpected response from remote server:'), l)
1864 _('Unexpected response from remote server:'), l)
1852 self.ui.status(_('%d files to transfer, %s of data\n') %
1865 self.ui.status(_('%d files to transfer, %s of data\n') %
1853 (total_files, util.bytecount(total_bytes)))
1866 (total_files, util.bytecount(total_bytes)))
1854 start = time.time()
1867 start = time.time()
1855 for i in xrange(total_files):
1868 for i in xrange(total_files):
1856 # XXX doesn't support '\n' or '\r' in filenames
1869 # XXX doesn't support '\n' or '\r' in filenames
1857 l = fp.readline()
1870 l = fp.readline()
1858 try:
1871 try:
1859 name, size = l.split('\0', 1)
1872 name, size = l.split('\0', 1)
1860 size = int(size)
1873 size = int(size)
1861 except ValueError, TypeError:
1874 except ValueError, TypeError:
1862 raise util.UnexpectedOutput(
1875 raise util.UnexpectedOutput(
1863 _('Unexpected response from remote server:'), l)
1876 _('Unexpected response from remote server:'), l)
1864 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1877 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1865 ofp = self.sopener(name, 'w')
1878 ofp = self.sopener(name, 'w')
1866 for chunk in util.filechunkiter(fp, limit=size):
1879 for chunk in util.filechunkiter(fp, limit=size):
1867 ofp.write(chunk)
1880 ofp.write(chunk)
1868 ofp.close()
1881 ofp.close()
1869 elapsed = time.time() - start
1882 elapsed = time.time() - start
1870 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1883 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1871 (util.bytecount(total_bytes), elapsed,
1884 (util.bytecount(total_bytes), elapsed,
1872 util.bytecount(total_bytes / elapsed)))
1885 util.bytecount(total_bytes / elapsed)))
1873 self.reload()
1886 self.reload()
1874 return len(self.heads()) + 1
1887 return len(self.heads()) + 1
1875
1888
1876 def clone(self, remote, heads=[], stream=False):
1889 def clone(self, remote, heads=[], stream=False):
1877 '''clone remote repository.
1890 '''clone remote repository.
1878
1891
1879 keyword arguments:
1892 keyword arguments:
1880 heads: list of revs to clone (forces use of pull)
1893 heads: list of revs to clone (forces use of pull)
1881 stream: use streaming clone if possible'''
1894 stream: use streaming clone if possible'''
1882
1895
1883 # now, all clients that can request uncompressed clones can
1896 # now, all clients that can request uncompressed clones can
1884 # read repo formats supported by all servers that can serve
1897 # read repo formats supported by all servers that can serve
1885 # them.
1898 # them.
1886
1899
1887 # if revlog format changes, client will have to check version
1900 # if revlog format changes, client will have to check version
1888 # and format flags on "stream" capability, and use
1901 # and format flags on "stream" capability, and use
1889 # uncompressed only if compatible.
1902 # uncompressed only if compatible.
1890
1903
1891 if stream and not heads and remote.capable('stream'):
1904 if stream and not heads and remote.capable('stream'):
1892 return self.stream_in(remote)
1905 return self.stream_in(remote)
1893 return self.pull(remote, heads)
1906 return self.pull(remote, heads)
1894
1907
1895 # used to avoid circular references so destructors work
1908 # used to avoid circular references so destructors work
1896 def aftertrans(files):
1909 def aftertrans(files):
1897 renamefiles = [tuple(t) for t in files]
1910 renamefiles = [tuple(t) for t in files]
1898 def a():
1911 def a():
1899 for src, dest in renamefiles:
1912 for src, dest in renamefiles:
1900 util.rename(src, dest)
1913 util.rename(src, dest)
1901 return a
1914 return a
1902
1915
1903 def instance(ui, path, create):
1916 def instance(ui, path, create):
1904 return localrepository(ui, util.drop_scheme('file', path), create)
1917 return localrepository(ui, util.drop_scheme('file', path), create)
1905
1918
1906 def islocal(path):
1919 def islocal(path):
1907 return True
1920 return True
General Comments 0
You need to be logged in to leave comments. Login now