##// END OF EJS Templates
small fixes for the parent patch...
Alexis S. L. Carvalho -
r4166:c0271aba default
parent child Browse files
Show More
@@ -1,1933 +1,1932
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 if parentui.config('format', 'usestore', 1):
44 requirements = ["revlogv1"]
45 if parentui.configbool('format', 'usestore', True):
45 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
46 requirements = ("revlogv1", "store")
47 requirements.append("store")
47 else:
48 # create an invalid changelog
48 requirements = ("revlogv1")
49 self.opener("00changelog.i", "a").write(
50 '\0\0\0\2' # represents revlogv2
51 ' dummy changelog to prevent using the old repo layout'
52 )
49 reqfile = self.opener("requires", "w")
53 reqfile = self.opener("requires", "w")
50 for r in requirements:
54 for r in requirements:
51 reqfile.write("%s\n" % r)
55 reqfile.write("%s\n" % r)
52 reqfile.close()
56 reqfile.close()
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
57 )
58 else:
57 else:
59 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
60 elif create:
59 elif create:
61 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
62 else:
61 else:
63 # find requirements
62 # find requirements
64 try:
63 try:
65 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
66 except IOError, inst:
65 except IOError, inst:
67 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
68 raise
67 raise
69 requirements = []
68 requirements = []
70 # check them
69 # check them
71 for r in requirements:
70 for r in requirements:
72 if r not in self.supported:
71 if r not in self.supported:
73 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
74
73
75 # setup store
74 # setup store
76 if "store" in requirements:
75 if "store" in requirements:
77 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
78 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
79 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
80 else:
79 else:
81 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
82 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
83 self.spath = self.path
82 self.spath = self.path
84 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
85
84
86 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
87 try:
86 try:
88 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
89 except IOError:
88 except IOError:
90 pass
89 pass
91
90
92 v = self.ui.configrevlog()
91 v = self.ui.configrevlog()
93 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
94 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
95 fl = v.get('flags', None)
94 fl = v.get('flags', None)
96 flags = 0
95 flags = 0
97 if fl != None:
96 if fl != None:
98 for x in fl.split():
97 for x in fl.split():
99 flags |= revlog.flagstr(x)
98 flags |= revlog.flagstr(x)
100 elif self.revlogv1:
99 elif self.revlogv1:
101 flags = revlog.REVLOG_DEFAULT_FLAGS
100 flags = revlog.REVLOG_DEFAULT_FLAGS
102
101
103 v = self.revlogversion | flags
102 v = self.revlogversion | flags
104 self.manifest = manifest.manifest(self.sopener, v)
103 self.manifest = manifest.manifest(self.sopener, v)
105 self.changelog = changelog.changelog(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
106
105
107 fallback = self.ui.config('ui', 'fallbackencoding')
106 fallback = self.ui.config('ui', 'fallbackencoding')
108 if fallback:
107 if fallback:
109 util._fallbackencoding = fallback
108 util._fallbackencoding = fallback
110
109
111 # the changelog might not have the inline index flag
110 # the changelog might not have the inline index flag
112 # on. If the format of the changelog is the same as found in
111 # on. If the format of the changelog is the same as found in
113 # .hgrc, apply any flags found in the .hgrc as well.
112 # .hgrc, apply any flags found in the .hgrc as well.
114 # Otherwise, just version from the changelog
113 # Otherwise, just version from the changelog
115 v = self.changelog.version
114 v = self.changelog.version
116 if v == self.revlogversion:
115 if v == self.revlogversion:
117 v |= flags
116 v |= flags
118 self.revlogversion = v
117 self.revlogversion = v
119
118
120 self.tagscache = None
119 self.tagscache = None
121 self.branchcache = None
120 self.branchcache = None
122 self.nodetagscache = None
121 self.nodetagscache = None
123 self.filterpats = {}
122 self.filterpats = {}
124 self.transhandle = None
123 self.transhandle = None
125
124
126 self._link = lambda x: False
125 self._link = lambda x: False
127 if util.checklink(self.root):
126 if util.checklink(self.root):
128 r = self.root # avoid circular reference in lambda
127 r = self.root # avoid circular reference in lambda
129 self._link = lambda x: util.is_link(os.path.join(r, x))
128 self._link = lambda x: util.is_link(os.path.join(r, x))
130
129
131 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
130 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
132
131
133 def url(self):
132 def url(self):
134 return 'file:' + self.root
133 return 'file:' + self.root
135
134
136 def hook(self, name, throw=False, **args):
135 def hook(self, name, throw=False, **args):
137 def callhook(hname, funcname):
136 def callhook(hname, funcname):
138 '''call python hook. hook is callable object, looked up as
137 '''call python hook. hook is callable object, looked up as
139 name in python module. if callable returns "true", hook
138 name in python module. if callable returns "true", hook
140 fails, else passes. if hook raises exception, treated as
139 fails, else passes. if hook raises exception, treated as
141 hook failure. exception propagates if throw is "true".
140 hook failure. exception propagates if throw is "true".
142
141
143 reason for "true" meaning "hook failed" is so that
142 reason for "true" meaning "hook failed" is so that
144 unmodified commands (e.g. mercurial.commands.update) can
143 unmodified commands (e.g. mercurial.commands.update) can
145 be run as hooks without wrappers to convert return values.'''
144 be run as hooks without wrappers to convert return values.'''
146
145
147 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
146 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
148 obj = funcname
147 obj = funcname
149 if not callable(obj):
148 if not callable(obj):
150 d = funcname.rfind('.')
149 d = funcname.rfind('.')
151 if d == -1:
150 if d == -1:
152 raise util.Abort(_('%s hook is invalid ("%s" not in '
151 raise util.Abort(_('%s hook is invalid ("%s" not in '
153 'a module)') % (hname, funcname))
152 'a module)') % (hname, funcname))
154 modname = funcname[:d]
153 modname = funcname[:d]
155 try:
154 try:
156 obj = __import__(modname)
155 obj = __import__(modname)
157 except ImportError:
156 except ImportError:
158 try:
157 try:
159 # extensions are loaded with hgext_ prefix
158 # extensions are loaded with hgext_ prefix
160 obj = __import__("hgext_%s" % modname)
159 obj = __import__("hgext_%s" % modname)
161 except ImportError:
160 except ImportError:
162 raise util.Abort(_('%s hook is invalid '
161 raise util.Abort(_('%s hook is invalid '
163 '(import of "%s" failed)') %
162 '(import of "%s" failed)') %
164 (hname, modname))
163 (hname, modname))
165 try:
164 try:
166 for p in funcname.split('.')[1:]:
165 for p in funcname.split('.')[1:]:
167 obj = getattr(obj, p)
166 obj = getattr(obj, p)
168 except AttributeError, err:
167 except AttributeError, err:
169 raise util.Abort(_('%s hook is invalid '
168 raise util.Abort(_('%s hook is invalid '
170 '("%s" is not defined)') %
169 '("%s" is not defined)') %
171 (hname, funcname))
170 (hname, funcname))
172 if not callable(obj):
171 if not callable(obj):
173 raise util.Abort(_('%s hook is invalid '
172 raise util.Abort(_('%s hook is invalid '
174 '("%s" is not callable)') %
173 '("%s" is not callable)') %
175 (hname, funcname))
174 (hname, funcname))
176 try:
175 try:
177 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
176 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
178 except (KeyboardInterrupt, util.SignalInterrupt):
177 except (KeyboardInterrupt, util.SignalInterrupt):
179 raise
178 raise
180 except Exception, exc:
179 except Exception, exc:
181 if isinstance(exc, util.Abort):
180 if isinstance(exc, util.Abort):
182 self.ui.warn(_('error: %s hook failed: %s\n') %
181 self.ui.warn(_('error: %s hook failed: %s\n') %
183 (hname, exc.args[0]))
182 (hname, exc.args[0]))
184 else:
183 else:
185 self.ui.warn(_('error: %s hook raised an exception: '
184 self.ui.warn(_('error: %s hook raised an exception: '
186 '%s\n') % (hname, exc))
185 '%s\n') % (hname, exc))
187 if throw:
186 if throw:
188 raise
187 raise
189 self.ui.print_exc()
188 self.ui.print_exc()
190 return True
189 return True
191 if r:
190 if r:
192 if throw:
191 if throw:
193 raise util.Abort(_('%s hook failed') % hname)
192 raise util.Abort(_('%s hook failed') % hname)
194 self.ui.warn(_('warning: %s hook failed\n') % hname)
193 self.ui.warn(_('warning: %s hook failed\n') % hname)
195 return r
194 return r
196
195
197 def runhook(name, cmd):
196 def runhook(name, cmd):
198 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
197 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
199 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
198 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
200 r = util.system(cmd, environ=env, cwd=self.root)
199 r = util.system(cmd, environ=env, cwd=self.root)
201 if r:
200 if r:
202 desc, r = util.explain_exit(r)
201 desc, r = util.explain_exit(r)
203 if throw:
202 if throw:
204 raise util.Abort(_('%s hook %s') % (name, desc))
203 raise util.Abort(_('%s hook %s') % (name, desc))
205 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
204 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
206 return r
205 return r
207
206
208 r = False
207 r = False
209 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
208 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
210 if hname.split(".", 1)[0] == name and cmd]
209 if hname.split(".", 1)[0] == name and cmd]
211 hooks.sort()
210 hooks.sort()
212 for hname, cmd in hooks:
211 for hname, cmd in hooks:
213 if callable(cmd):
212 if callable(cmd):
214 r = callhook(hname, cmd) or r
213 r = callhook(hname, cmd) or r
215 elif cmd.startswith('python:'):
214 elif cmd.startswith('python:'):
216 r = callhook(hname, cmd[7:].strip()) or r
215 r = callhook(hname, cmd[7:].strip()) or r
217 else:
216 else:
218 r = runhook(hname, cmd) or r
217 r = runhook(hname, cmd) or r
219 return r
218 return r
220
219
221 tag_disallowed = ':\r\n'
220 tag_disallowed = ':\r\n'
222
221
223 def _tag(self, name, node, message, local, user, date, parent=None):
222 def _tag(self, name, node, message, local, user, date, parent=None):
224 use_dirstate = parent is None
223 use_dirstate = parent is None
225
224
226 for c in self.tag_disallowed:
225 for c in self.tag_disallowed:
227 if c in name:
226 if c in name:
228 raise util.Abort(_('%r cannot be used in a tag name') % c)
227 raise util.Abort(_('%r cannot be used in a tag name') % c)
229
228
230 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
229 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
231
230
232 if local:
231 if local:
233 # local tags are stored in the current charset
232 # local tags are stored in the current charset
234 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
233 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
235 self.hook('tag', node=hex(node), tag=name, local=local)
234 self.hook('tag', node=hex(node), tag=name, local=local)
236 return
235 return
237
236
238 # committed tags are stored in UTF-8
237 # committed tags are stored in UTF-8
239 line = '%s %s\n' % (hex(node), util.fromlocal(name))
238 line = '%s %s\n' % (hex(node), util.fromlocal(name))
240 if use_dirstate:
239 if use_dirstate:
241 self.wfile('.hgtags', 'ab').write(line)
240 self.wfile('.hgtags', 'ab').write(line)
242 else:
241 else:
243 ntags = self.filectx('.hgtags', parent).data()
242 ntags = self.filectx('.hgtags', parent).data()
244 self.wfile('.hgtags', 'ab').write(ntags + line)
243 self.wfile('.hgtags', 'ab').write(ntags + line)
245 if use_dirstate and self.dirstate.state('.hgtags') == '?':
244 if use_dirstate and self.dirstate.state('.hgtags') == '?':
246 self.add(['.hgtags'])
245 self.add(['.hgtags'])
247
246
248 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
247 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
249
248
250 self.hook('tag', node=hex(node), tag=name, local=local)
249 self.hook('tag', node=hex(node), tag=name, local=local)
251
250
252 return tagnode
251 return tagnode
253
252
254 def tag(self, name, node, message, local, user, date):
253 def tag(self, name, node, message, local, user, date):
255 '''tag a revision with a symbolic name.
254 '''tag a revision with a symbolic name.
256
255
257 if local is True, the tag is stored in a per-repository file.
256 if local is True, the tag is stored in a per-repository file.
258 otherwise, it is stored in the .hgtags file, and a new
257 otherwise, it is stored in the .hgtags file, and a new
259 changeset is committed with the change.
258 changeset is committed with the change.
260
259
261 keyword arguments:
260 keyword arguments:
262
261
263 local: whether to store tag in non-version-controlled file
262 local: whether to store tag in non-version-controlled file
264 (default False)
263 (default False)
265
264
266 message: commit message to use if committing
265 message: commit message to use if committing
267
266
268 user: name of user to use if committing
267 user: name of user to use if committing
269
268
270 date: date tuple to use if committing'''
269 date: date tuple to use if committing'''
271
270
272 for x in self.status()[:5]:
271 for x in self.status()[:5]:
273 if '.hgtags' in x:
272 if '.hgtags' in x:
274 raise util.Abort(_('working copy of .hgtags is changed '
273 raise util.Abort(_('working copy of .hgtags is changed '
275 '(please commit .hgtags manually)'))
274 '(please commit .hgtags manually)'))
276
275
277
276
278 self._tag(name, node, message, local, user, date)
277 self._tag(name, node, message, local, user, date)
279
278
280 def tags(self):
279 def tags(self):
281 '''return a mapping of tag to node'''
280 '''return a mapping of tag to node'''
282 if not self.tagscache:
281 if not self.tagscache:
283 self.tagscache = {}
282 self.tagscache = {}
284
283
285 def parsetag(line, context):
284 def parsetag(line, context):
286 if not line:
285 if not line:
287 return
286 return
288 s = l.split(" ", 1)
287 s = l.split(" ", 1)
289 if len(s) != 2:
288 if len(s) != 2:
290 self.ui.warn(_("%s: cannot parse entry\n") % context)
289 self.ui.warn(_("%s: cannot parse entry\n") % context)
291 return
290 return
292 node, key = s
291 node, key = s
293 key = util.tolocal(key.strip()) # stored in UTF-8
292 key = util.tolocal(key.strip()) # stored in UTF-8
294 try:
293 try:
295 bin_n = bin(node)
294 bin_n = bin(node)
296 except TypeError:
295 except TypeError:
297 self.ui.warn(_("%s: node '%s' is not well formed\n") %
296 self.ui.warn(_("%s: node '%s' is not well formed\n") %
298 (context, node))
297 (context, node))
299 return
298 return
300 if bin_n not in self.changelog.nodemap:
299 if bin_n not in self.changelog.nodemap:
301 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
300 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
302 (context, key))
301 (context, key))
303 return
302 return
304 self.tagscache[key] = bin_n
303 self.tagscache[key] = bin_n
305
304
306 # read the tags file from each head, ending with the tip,
305 # read the tags file from each head, ending with the tip,
307 # and add each tag found to the map, with "newer" ones
306 # and add each tag found to the map, with "newer" ones
308 # taking precedence
307 # taking precedence
309 f = None
308 f = None
310 for rev, node, fnode in self._hgtagsnodes():
309 for rev, node, fnode in self._hgtagsnodes():
311 f = (f and f.filectx(fnode) or
310 f = (f and f.filectx(fnode) or
312 self.filectx('.hgtags', fileid=fnode))
311 self.filectx('.hgtags', fileid=fnode))
313 count = 0
312 count = 0
314 for l in f.data().splitlines():
313 for l in f.data().splitlines():
315 count += 1
314 count += 1
316 parsetag(l, _("%s, line %d") % (str(f), count))
315 parsetag(l, _("%s, line %d") % (str(f), count))
317
316
318 try:
317 try:
319 f = self.opener("localtags")
318 f = self.opener("localtags")
320 count = 0
319 count = 0
321 for l in f:
320 for l in f:
322 # localtags are stored in the local character set
321 # localtags are stored in the local character set
323 # while the internal tag table is stored in UTF-8
322 # while the internal tag table is stored in UTF-8
324 l = util.fromlocal(l)
323 l = util.fromlocal(l)
325 count += 1
324 count += 1
326 parsetag(l, _("localtags, line %d") % count)
325 parsetag(l, _("localtags, line %d") % count)
327 except IOError:
326 except IOError:
328 pass
327 pass
329
328
330 self.tagscache['tip'] = self.changelog.tip()
329 self.tagscache['tip'] = self.changelog.tip()
331
330
332 return self.tagscache
331 return self.tagscache
333
332
334 def _hgtagsnodes(self):
333 def _hgtagsnodes(self):
335 heads = self.heads()
334 heads = self.heads()
336 heads.reverse()
335 heads.reverse()
337 last = {}
336 last = {}
338 ret = []
337 ret = []
339 for node in heads:
338 for node in heads:
340 c = self.changectx(node)
339 c = self.changectx(node)
341 rev = c.rev()
340 rev = c.rev()
342 try:
341 try:
343 fnode = c.filenode('.hgtags')
342 fnode = c.filenode('.hgtags')
344 except revlog.LookupError:
343 except revlog.LookupError:
345 continue
344 continue
346 ret.append((rev, node, fnode))
345 ret.append((rev, node, fnode))
347 if fnode in last:
346 if fnode in last:
348 ret[last[fnode]] = None
347 ret[last[fnode]] = None
349 last[fnode] = len(ret) - 1
348 last[fnode] = len(ret) - 1
350 return [item for item in ret if item]
349 return [item for item in ret if item]
351
350
352 def tagslist(self):
351 def tagslist(self):
353 '''return a list of tags ordered by revision'''
352 '''return a list of tags ordered by revision'''
354 l = []
353 l = []
355 for t, n in self.tags().items():
354 for t, n in self.tags().items():
356 try:
355 try:
357 r = self.changelog.rev(n)
356 r = self.changelog.rev(n)
358 except:
357 except:
359 r = -2 # sort to the beginning of the list if unknown
358 r = -2 # sort to the beginning of the list if unknown
360 l.append((r, t, n))
359 l.append((r, t, n))
361 l.sort()
360 l.sort()
362 return [(t, n) for r, t, n in l]
361 return [(t, n) for r, t, n in l]
363
362
364 def nodetags(self, node):
363 def nodetags(self, node):
365 '''return the tags associated with a node'''
364 '''return the tags associated with a node'''
366 if not self.nodetagscache:
365 if not self.nodetagscache:
367 self.nodetagscache = {}
366 self.nodetagscache = {}
368 for t, n in self.tags().items():
367 for t, n in self.tags().items():
369 self.nodetagscache.setdefault(n, []).append(t)
368 self.nodetagscache.setdefault(n, []).append(t)
370 return self.nodetagscache.get(node, [])
369 return self.nodetagscache.get(node, [])
371
370
372 def _branchtags(self):
371 def _branchtags(self):
373 partial, last, lrev = self._readbranchcache()
372 partial, last, lrev = self._readbranchcache()
374
373
375 tiprev = self.changelog.count() - 1
374 tiprev = self.changelog.count() - 1
376 if lrev != tiprev:
375 if lrev != tiprev:
377 self._updatebranchcache(partial, lrev+1, tiprev+1)
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
378 self._writebranchcache(partial, self.changelog.tip(), tiprev)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
379
378
380 return partial
379 return partial
381
380
382 def branchtags(self):
381 def branchtags(self):
383 if self.branchcache is not None:
382 if self.branchcache is not None:
384 return self.branchcache
383 return self.branchcache
385
384
386 self.branchcache = {} # avoid recursion in changectx
385 self.branchcache = {} # avoid recursion in changectx
387 partial = self._branchtags()
386 partial = self._branchtags()
388
387
389 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
390 # charset internally
389 # charset internally
391 for k, v in partial.items():
390 for k, v in partial.items():
392 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
393 return self.branchcache
392 return self.branchcache
394
393
395 def _readbranchcache(self):
394 def _readbranchcache(self):
396 partial = {}
395 partial = {}
397 try:
396 try:
398 f = self.opener("branches.cache")
397 f = self.opener("branches.cache")
399 lines = f.read().split('\n')
398 lines = f.read().split('\n')
400 f.close()
399 f.close()
401 last, lrev = lines.pop(0).rstrip().split(" ", 1)
400 last, lrev = lines.pop(0).rstrip().split(" ", 1)
402 last, lrev = bin(last), int(lrev)
401 last, lrev = bin(last), int(lrev)
403 if not (lrev < self.changelog.count() and
402 if not (lrev < self.changelog.count() and
404 self.changelog.node(lrev) == last): # sanity check
403 self.changelog.node(lrev) == last): # sanity check
405 # invalidate the cache
404 # invalidate the cache
406 raise ValueError('Invalid branch cache: unknown tip')
405 raise ValueError('Invalid branch cache: unknown tip')
407 for l in lines:
406 for l in lines:
408 if not l: continue
407 if not l: continue
409 node, label = l.rstrip().split(" ", 1)
408 node, label = l.rstrip().split(" ", 1)
410 partial[label] = bin(node)
409 partial[label] = bin(node)
411 except (KeyboardInterrupt, util.SignalInterrupt):
410 except (KeyboardInterrupt, util.SignalInterrupt):
412 raise
411 raise
413 except Exception, inst:
412 except Exception, inst:
414 if self.ui.debugflag:
413 if self.ui.debugflag:
415 self.ui.warn(str(inst), '\n')
414 self.ui.warn(str(inst), '\n')
416 partial, last, lrev = {}, nullid, nullrev
415 partial, last, lrev = {}, nullid, nullrev
417 return partial, last, lrev
416 return partial, last, lrev
418
417
419 def _writebranchcache(self, branches, tip, tiprev):
418 def _writebranchcache(self, branches, tip, tiprev):
420 try:
419 try:
421 f = self.opener("branches.cache", "w")
420 f = self.opener("branches.cache", "w")
422 f.write("%s %s\n" % (hex(tip), tiprev))
421 f.write("%s %s\n" % (hex(tip), tiprev))
423 for label, node in branches.iteritems():
422 for label, node in branches.iteritems():
424 f.write("%s %s\n" % (hex(node), label))
423 f.write("%s %s\n" % (hex(node), label))
425 except IOError:
424 except IOError:
426 pass
425 pass
427
426
428 def _updatebranchcache(self, partial, start, end):
427 def _updatebranchcache(self, partial, start, end):
429 for r in xrange(start, end):
428 for r in xrange(start, end):
430 c = self.changectx(r)
429 c = self.changectx(r)
431 b = c.branch()
430 b = c.branch()
432 if b:
431 if b:
433 partial[b] = c.node()
432 partial[b] = c.node()
434
433
435 def lookup(self, key):
434 def lookup(self, key):
436 if key == '.':
435 if key == '.':
437 key = self.dirstate.parents()[0]
436 key = self.dirstate.parents()[0]
438 if key == nullid:
437 if key == nullid:
439 raise repo.RepoError(_("no revision checked out"))
438 raise repo.RepoError(_("no revision checked out"))
440 elif key == 'null':
439 elif key == 'null':
441 return nullid
440 return nullid
442 n = self.changelog._match(key)
441 n = self.changelog._match(key)
443 if n:
442 if n:
444 return n
443 return n
445 if key in self.tags():
444 if key in self.tags():
446 return self.tags()[key]
445 return self.tags()[key]
447 if key in self.branchtags():
446 if key in self.branchtags():
448 return self.branchtags()[key]
447 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
448 n = self.changelog._partialmatch(key)
450 if n:
449 if n:
451 return n
450 return n
452 raise repo.RepoError(_("unknown revision '%s'") % key)
451 raise repo.RepoError(_("unknown revision '%s'") % key)
453
452
454 def dev(self):
453 def dev(self):
455 return os.lstat(self.path).st_dev
454 return os.lstat(self.path).st_dev
456
455
457 def local(self):
456 def local(self):
458 return True
457 return True
459
458
460 def join(self, f):
459 def join(self, f):
461 return os.path.join(self.path, f)
460 return os.path.join(self.path, f)
462
461
463 def sjoin(self, f):
462 def sjoin(self, f):
464 f = self.encodefn(f)
463 f = self.encodefn(f)
465 return os.path.join(self.spath, f)
464 return os.path.join(self.spath, f)
466
465
467 def wjoin(self, f):
466 def wjoin(self, f):
468 return os.path.join(self.root, f)
467 return os.path.join(self.root, f)
469
468
470 def file(self, f):
469 def file(self, f):
471 if f[0] == '/':
470 if f[0] == '/':
472 f = f[1:]
471 f = f[1:]
473 return filelog.filelog(self.sopener, f, self.revlogversion)
472 return filelog.filelog(self.sopener, f, self.revlogversion)
474
473
475 def changectx(self, changeid=None):
474 def changectx(self, changeid=None):
476 return context.changectx(self, changeid)
475 return context.changectx(self, changeid)
477
476
478 def workingctx(self):
477 def workingctx(self):
479 return context.workingctx(self)
478 return context.workingctx(self)
480
479
481 def parents(self, changeid=None):
480 def parents(self, changeid=None):
482 '''
481 '''
483 get list of changectxs for parents of changeid or working directory
482 get list of changectxs for parents of changeid or working directory
484 '''
483 '''
485 if changeid is None:
484 if changeid is None:
486 pl = self.dirstate.parents()
485 pl = self.dirstate.parents()
487 else:
486 else:
488 n = self.changelog.lookup(changeid)
487 n = self.changelog.lookup(changeid)
489 pl = self.changelog.parents(n)
488 pl = self.changelog.parents(n)
490 if pl[1] == nullid:
489 if pl[1] == nullid:
491 return [self.changectx(pl[0])]
490 return [self.changectx(pl[0])]
492 return [self.changectx(pl[0]), self.changectx(pl[1])]
491 return [self.changectx(pl[0]), self.changectx(pl[1])]
493
492
494 def filectx(self, path, changeid=None, fileid=None):
493 def filectx(self, path, changeid=None, fileid=None):
495 """changeid can be a changeset revision, node, or tag.
494 """changeid can be a changeset revision, node, or tag.
496 fileid can be a file revision or node."""
495 fileid can be a file revision or node."""
497 return context.filectx(self, path, changeid, fileid)
496 return context.filectx(self, path, changeid, fileid)
498
497
499 def getcwd(self):
498 def getcwd(self):
500 return self.dirstate.getcwd()
499 return self.dirstate.getcwd()
501
500
502 def wfile(self, f, mode='r'):
501 def wfile(self, f, mode='r'):
503 return self.wopener(f, mode)
502 return self.wopener(f, mode)
504
503
505 def _filter(self, filter, filename, data):
504 def _filter(self, filter, filename, data):
506 if filter not in self.filterpats:
505 if filter not in self.filterpats:
507 l = []
506 l = []
508 for pat, cmd in self.ui.configitems(filter):
507 for pat, cmd in self.ui.configitems(filter):
509 mf = util.matcher(self.root, "", [pat], [], [])[1]
508 mf = util.matcher(self.root, "", [pat], [], [])[1]
510 l.append((mf, cmd))
509 l.append((mf, cmd))
511 self.filterpats[filter] = l
510 self.filterpats[filter] = l
512
511
513 for mf, cmd in self.filterpats[filter]:
512 for mf, cmd in self.filterpats[filter]:
514 if mf(filename):
513 if mf(filename):
515 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
514 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
516 data = util.filter(data, cmd)
515 data = util.filter(data, cmd)
517 break
516 break
518
517
519 return data
518 return data
520
519
521 def wread(self, filename):
520 def wread(self, filename):
522 if self._link(filename):
521 if self._link(filename):
523 data = os.readlink(self.wjoin(filename))
522 data = os.readlink(self.wjoin(filename))
524 else:
523 else:
525 data = self.wopener(filename, 'r').read()
524 data = self.wopener(filename, 'r').read()
526 return self._filter("encode", filename, data)
525 return self._filter("encode", filename, data)
527
526
528 def wwrite(self, filename, data, flags):
527 def wwrite(self, filename, data, flags):
529 data = self._filter("decode", filename, data)
528 data = self._filter("decode", filename, data)
530 if "l" in flags:
529 if "l" in flags:
531 f = self.wjoin(filename)
530 f = self.wjoin(filename)
532 try:
531 try:
533 os.unlink(f)
532 os.unlink(f)
534 except OSError:
533 except OSError:
535 pass
534 pass
536 d = os.path.dirname(f)
535 d = os.path.dirname(f)
537 if not os.path.exists(d):
536 if not os.path.exists(d):
538 os.makedirs(d)
537 os.makedirs(d)
539 os.symlink(data, f)
538 os.symlink(data, f)
540 else:
539 else:
541 try:
540 try:
542 if self._link(filename):
541 if self._link(filename):
543 os.unlink(self.wjoin(filename))
542 os.unlink(self.wjoin(filename))
544 except OSError:
543 except OSError:
545 pass
544 pass
546 self.wopener(filename, 'w').write(data)
545 self.wopener(filename, 'w').write(data)
547 util.set_exec(self.wjoin(filename), "x" in flags)
546 util.set_exec(self.wjoin(filename), "x" in flags)
548
547
549 def wwritedata(self, filename, data):
548 def wwritedata(self, filename, data):
550 return self._filter("decode", filename, data)
549 return self._filter("decode", filename, data)
551
550
552 def transaction(self):
551 def transaction(self):
553 tr = self.transhandle
552 tr = self.transhandle
554 if tr != None and tr.running():
553 if tr != None and tr.running():
555 return tr.nest()
554 return tr.nest()
556
555
557 # save dirstate for rollback
556 # save dirstate for rollback
558 try:
557 try:
559 ds = self.opener("dirstate").read()
558 ds = self.opener("dirstate").read()
560 except IOError:
559 except IOError:
561 ds = ""
560 ds = ""
562 self.opener("journal.dirstate", "w").write(ds)
561 self.opener("journal.dirstate", "w").write(ds)
563
562
564 renames = [(self.sjoin("journal"), self.sjoin("undo")),
563 renames = [(self.sjoin("journal"), self.sjoin("undo")),
565 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
564 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
566 tr = transaction.transaction(self.ui.warn, self.sopener,
565 tr = transaction.transaction(self.ui.warn, self.sopener,
567 self.sjoin("journal"),
566 self.sjoin("journal"),
568 aftertrans(renames))
567 aftertrans(renames))
569 self.transhandle = tr
568 self.transhandle = tr
570 return tr
569 return tr
571
570
572 def recover(self):
571 def recover(self):
573 l = self.lock()
572 l = self.lock()
574 if os.path.exists(self.sjoin("journal")):
573 if os.path.exists(self.sjoin("journal")):
575 self.ui.status(_("rolling back interrupted transaction\n"))
574 self.ui.status(_("rolling back interrupted transaction\n"))
576 transaction.rollback(self.sopener, self.sjoin("journal"))
575 transaction.rollback(self.sopener, self.sjoin("journal"))
577 self.reload()
576 self.reload()
578 return True
577 return True
579 else:
578 else:
580 self.ui.warn(_("no interrupted transaction available\n"))
579 self.ui.warn(_("no interrupted transaction available\n"))
581 return False
580 return False
582
581
583 def rollback(self, wlock=None):
582 def rollback(self, wlock=None):
584 if not wlock:
583 if not wlock:
585 wlock = self.wlock()
584 wlock = self.wlock()
586 l = self.lock()
585 l = self.lock()
587 if os.path.exists(self.sjoin("undo")):
586 if os.path.exists(self.sjoin("undo")):
588 self.ui.status(_("rolling back last transaction\n"))
587 self.ui.status(_("rolling back last transaction\n"))
589 transaction.rollback(self.sopener, self.sjoin("undo"))
588 transaction.rollback(self.sopener, self.sjoin("undo"))
590 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
589 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
591 self.reload()
590 self.reload()
592 self.wreload()
591 self.wreload()
593 else:
592 else:
594 self.ui.warn(_("no rollback information available\n"))
593 self.ui.warn(_("no rollback information available\n"))
595
594
596 def wreload(self):
595 def wreload(self):
597 self.dirstate.read()
596 self.dirstate.read()
598
597
599 def reload(self):
598 def reload(self):
600 self.changelog.load()
599 self.changelog.load()
601 self.manifest.load()
600 self.manifest.load()
602 self.tagscache = None
601 self.tagscache = None
603 self.nodetagscache = None
602 self.nodetagscache = None
604
603
605 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
604 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
606 desc=None):
605 desc=None):
607 try:
606 try:
608 l = lock.lock(lockname, 0, releasefn, desc=desc)
607 l = lock.lock(lockname, 0, releasefn, desc=desc)
609 except lock.LockHeld, inst:
608 except lock.LockHeld, inst:
610 if not wait:
609 if not wait:
611 raise
610 raise
612 self.ui.warn(_("waiting for lock on %s held by %r\n") %
611 self.ui.warn(_("waiting for lock on %s held by %r\n") %
613 (desc, inst.locker))
612 (desc, inst.locker))
614 # default to 600 seconds timeout
613 # default to 600 seconds timeout
615 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
614 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
616 releasefn, desc=desc)
615 releasefn, desc=desc)
617 if acquirefn:
616 if acquirefn:
618 acquirefn()
617 acquirefn()
619 return l
618 return l
620
619
621 def lock(self, wait=1):
620 def lock(self, wait=1):
622 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
621 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
623 desc=_('repository %s') % self.origroot)
622 desc=_('repository %s') % self.origroot)
624
623
625 def wlock(self, wait=1):
624 def wlock(self, wait=1):
626 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
625 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
627 self.wreload,
626 self.wreload,
628 desc=_('working directory of %s') % self.origroot)
627 desc=_('working directory of %s') % self.origroot)
629
628
630 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
629 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
631 """
630 """
632 commit an individual file as part of a larger transaction
631 commit an individual file as part of a larger transaction
633 """
632 """
634
633
635 t = self.wread(fn)
634 t = self.wread(fn)
636 fl = self.file(fn)
635 fl = self.file(fn)
637 fp1 = manifest1.get(fn, nullid)
636 fp1 = manifest1.get(fn, nullid)
638 fp2 = manifest2.get(fn, nullid)
637 fp2 = manifest2.get(fn, nullid)
639
638
640 meta = {}
639 meta = {}
641 cp = self.dirstate.copied(fn)
640 cp = self.dirstate.copied(fn)
642 if cp:
641 if cp:
643 # Mark the new revision of this file as a copy of another
642 # Mark the new revision of this file as a copy of another
644 # file. This copy data will effectively act as a parent
643 # file. This copy data will effectively act as a parent
645 # of this new revision. If this is a merge, the first
644 # of this new revision. If this is a merge, the first
646 # parent will be the nullid (meaning "look up the copy data")
645 # parent will be the nullid (meaning "look up the copy data")
647 # and the second one will be the other parent. For example:
646 # and the second one will be the other parent. For example:
648 #
647 #
649 # 0 --- 1 --- 3 rev1 changes file foo
648 # 0 --- 1 --- 3 rev1 changes file foo
650 # \ / rev2 renames foo to bar and changes it
649 # \ / rev2 renames foo to bar and changes it
651 # \- 2 -/ rev3 should have bar with all changes and
650 # \- 2 -/ rev3 should have bar with all changes and
652 # should record that bar descends from
651 # should record that bar descends from
653 # bar in rev2 and foo in rev1
652 # bar in rev2 and foo in rev1
654 #
653 #
655 # this allows this merge to succeed:
654 # this allows this merge to succeed:
656 #
655 #
657 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
656 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
658 # \ / merging rev3 and rev4 should use bar@rev2
657 # \ / merging rev3 and rev4 should use bar@rev2
659 # \- 2 --- 4 as the merge base
658 # \- 2 --- 4 as the merge base
660 #
659 #
661 meta["copy"] = cp
660 meta["copy"] = cp
662 if not manifest2: # not a branch merge
661 if not manifest2: # not a branch merge
663 meta["copyrev"] = hex(manifest1.get(cp, nullid))
662 meta["copyrev"] = hex(manifest1.get(cp, nullid))
664 fp2 = nullid
663 fp2 = nullid
665 elif fp2 != nullid: # copied on remote side
664 elif fp2 != nullid: # copied on remote side
666 meta["copyrev"] = hex(manifest1.get(cp, nullid))
665 meta["copyrev"] = hex(manifest1.get(cp, nullid))
667 elif fp1 != nullid: # copied on local side, reversed
666 elif fp1 != nullid: # copied on local side, reversed
668 meta["copyrev"] = hex(manifest2.get(cp))
667 meta["copyrev"] = hex(manifest2.get(cp))
669 fp2 = fp1
668 fp2 = fp1
670 else: # directory rename
669 else: # directory rename
671 meta["copyrev"] = hex(manifest1.get(cp, nullid))
670 meta["copyrev"] = hex(manifest1.get(cp, nullid))
672 self.ui.debug(_(" %s: copy %s:%s\n") %
671 self.ui.debug(_(" %s: copy %s:%s\n") %
673 (fn, cp, meta["copyrev"]))
672 (fn, cp, meta["copyrev"]))
674 fp1 = nullid
673 fp1 = nullid
675 elif fp2 != nullid:
674 elif fp2 != nullid:
676 # is one parent an ancestor of the other?
675 # is one parent an ancestor of the other?
677 fpa = fl.ancestor(fp1, fp2)
676 fpa = fl.ancestor(fp1, fp2)
678 if fpa == fp1:
677 if fpa == fp1:
679 fp1, fp2 = fp2, nullid
678 fp1, fp2 = fp2, nullid
680 elif fpa == fp2:
679 elif fpa == fp2:
681 fp2 = nullid
680 fp2 = nullid
682
681
683 # is the file unmodified from the parent? report existing entry
682 # is the file unmodified from the parent? report existing entry
684 if fp2 == nullid and not fl.cmp(fp1, t):
683 if fp2 == nullid and not fl.cmp(fp1, t):
685 return fp1
684 return fp1
686
685
687 changelist.append(fn)
686 changelist.append(fn)
688 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
687 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
689
688
690 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
689 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
691 if p1 is None:
690 if p1 is None:
692 p1, p2 = self.dirstate.parents()
691 p1, p2 = self.dirstate.parents()
693 return self.commit(files=files, text=text, user=user, date=date,
692 return self.commit(files=files, text=text, user=user, date=date,
694 p1=p1, p2=p2, wlock=wlock, extra=extra)
693 p1=p1, p2=p2, wlock=wlock, extra=extra)
695
694
696 def commit(self, files=None, text="", user=None, date=None,
695 def commit(self, files=None, text="", user=None, date=None,
697 match=util.always, force=False, lock=None, wlock=None,
696 match=util.always, force=False, lock=None, wlock=None,
698 force_editor=False, p1=None, p2=None, extra={}):
697 force_editor=False, p1=None, p2=None, extra={}):
699
698
700 commit = []
699 commit = []
701 remove = []
700 remove = []
702 changed = []
701 changed = []
703 use_dirstate = (p1 is None) # not rawcommit
702 use_dirstate = (p1 is None) # not rawcommit
704 extra = extra.copy()
703 extra = extra.copy()
705
704
706 if use_dirstate:
705 if use_dirstate:
707 if files:
706 if files:
708 for f in files:
707 for f in files:
709 s = self.dirstate.state(f)
708 s = self.dirstate.state(f)
710 if s in 'nmai':
709 if s in 'nmai':
711 commit.append(f)
710 commit.append(f)
712 elif s == 'r':
711 elif s == 'r':
713 remove.append(f)
712 remove.append(f)
714 else:
713 else:
715 self.ui.warn(_("%s not tracked!\n") % f)
714 self.ui.warn(_("%s not tracked!\n") % f)
716 else:
715 else:
717 changes = self.status(match=match)[:5]
716 changes = self.status(match=match)[:5]
718 modified, added, removed, deleted, unknown = changes
717 modified, added, removed, deleted, unknown = changes
719 commit = modified + added
718 commit = modified + added
720 remove = removed
719 remove = removed
721 else:
720 else:
722 commit = files
721 commit = files
723
722
724 if use_dirstate:
723 if use_dirstate:
725 p1, p2 = self.dirstate.parents()
724 p1, p2 = self.dirstate.parents()
726 update_dirstate = True
725 update_dirstate = True
727 else:
726 else:
728 p1, p2 = p1, p2 or nullid
727 p1, p2 = p1, p2 or nullid
729 update_dirstate = (self.dirstate.parents()[0] == p1)
728 update_dirstate = (self.dirstate.parents()[0] == p1)
730
729
731 c1 = self.changelog.read(p1)
730 c1 = self.changelog.read(p1)
732 c2 = self.changelog.read(p2)
731 c2 = self.changelog.read(p2)
733 m1 = self.manifest.read(c1[0]).copy()
732 m1 = self.manifest.read(c1[0]).copy()
734 m2 = self.manifest.read(c2[0])
733 m2 = self.manifest.read(c2[0])
735
734
736 if use_dirstate:
735 if use_dirstate:
737 branchname = self.workingctx().branch()
736 branchname = self.workingctx().branch()
738 try:
737 try:
739 branchname = branchname.decode('UTF-8').encode('UTF-8')
738 branchname = branchname.decode('UTF-8').encode('UTF-8')
740 except UnicodeDecodeError:
739 except UnicodeDecodeError:
741 raise util.Abort(_('branch name not in UTF-8!'))
740 raise util.Abort(_('branch name not in UTF-8!'))
742 else:
741 else:
743 branchname = ""
742 branchname = ""
744
743
745 if use_dirstate:
744 if use_dirstate:
746 oldname = c1[5].get("branch", "") # stored in UTF-8
745 oldname = c1[5].get("branch", "") # stored in UTF-8
747 if not commit and not remove and not force and p2 == nullid and \
746 if not commit and not remove and not force and p2 == nullid and \
748 branchname == oldname:
747 branchname == oldname:
749 self.ui.status(_("nothing changed\n"))
748 self.ui.status(_("nothing changed\n"))
750 return None
749 return None
751
750
752 xp1 = hex(p1)
751 xp1 = hex(p1)
753 if p2 == nullid: xp2 = ''
752 if p2 == nullid: xp2 = ''
754 else: xp2 = hex(p2)
753 else: xp2 = hex(p2)
755
754
756 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
755 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
757
756
758 if not wlock:
757 if not wlock:
759 wlock = self.wlock()
758 wlock = self.wlock()
760 if not lock:
759 if not lock:
761 lock = self.lock()
760 lock = self.lock()
762 tr = self.transaction()
761 tr = self.transaction()
763
762
764 # check in files
763 # check in files
765 new = {}
764 new = {}
766 linkrev = self.changelog.count()
765 linkrev = self.changelog.count()
767 commit.sort()
766 commit.sort()
768 is_exec = util.execfunc(self.root, m1.execf)
767 is_exec = util.execfunc(self.root, m1.execf)
769 is_link = util.linkfunc(self.root, m1.linkf)
768 is_link = util.linkfunc(self.root, m1.linkf)
770 for f in commit:
769 for f in commit:
771 self.ui.note(f + "\n")
770 self.ui.note(f + "\n")
772 try:
771 try:
773 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
772 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
774 m1.set(f, is_exec(f), is_link(f))
773 m1.set(f, is_exec(f), is_link(f))
775 except (OSError, IOError):
774 except (OSError, IOError):
776 if use_dirstate:
775 if use_dirstate:
777 self.ui.warn(_("trouble committing %s!\n") % f)
776 self.ui.warn(_("trouble committing %s!\n") % f)
778 raise
777 raise
779 else:
778 else:
780 remove.append(f)
779 remove.append(f)
781
780
782 # update manifest
781 # update manifest
783 m1.update(new)
782 m1.update(new)
784 remove.sort()
783 remove.sort()
785 removed = []
784 removed = []
786
785
787 for f in remove:
786 for f in remove:
788 if f in m1:
787 if f in m1:
789 del m1[f]
788 del m1[f]
790 removed.append(f)
789 removed.append(f)
791 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
790 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
792
791
793 # add changeset
792 # add changeset
794 new = new.keys()
793 new = new.keys()
795 new.sort()
794 new.sort()
796
795
797 user = user or self.ui.username()
796 user = user or self.ui.username()
798 if not text or force_editor:
797 if not text or force_editor:
799 edittext = []
798 edittext = []
800 if text:
799 if text:
801 edittext.append(text)
800 edittext.append(text)
802 edittext.append("")
801 edittext.append("")
803 edittext.append("HG: user: %s" % user)
802 edittext.append("HG: user: %s" % user)
804 if p2 != nullid:
803 if p2 != nullid:
805 edittext.append("HG: branch merge")
804 edittext.append("HG: branch merge")
806 if branchname:
805 if branchname:
807 edittext.append("HG: branch %s" % util.tolocal(branchname))
806 edittext.append("HG: branch %s" % util.tolocal(branchname))
808 edittext.extend(["HG: changed %s" % f for f in changed])
807 edittext.extend(["HG: changed %s" % f for f in changed])
809 edittext.extend(["HG: removed %s" % f for f in removed])
808 edittext.extend(["HG: removed %s" % f for f in removed])
810 if not changed and not remove:
809 if not changed and not remove:
811 edittext.append("HG: no files changed")
810 edittext.append("HG: no files changed")
812 edittext.append("")
811 edittext.append("")
813 # run editor in the repository root
812 # run editor in the repository root
814 olddir = os.getcwd()
813 olddir = os.getcwd()
815 os.chdir(self.root)
814 os.chdir(self.root)
816 text = self.ui.edit("\n".join(edittext), user)
815 text = self.ui.edit("\n".join(edittext), user)
817 os.chdir(olddir)
816 os.chdir(olddir)
818
817
819 lines = [line.rstrip() for line in text.rstrip().splitlines()]
818 lines = [line.rstrip() for line in text.rstrip().splitlines()]
820 while lines and not lines[0]:
819 while lines and not lines[0]:
821 del lines[0]
820 del lines[0]
822 if not lines:
821 if not lines:
823 return None
822 return None
824 text = '\n'.join(lines)
823 text = '\n'.join(lines)
825 if branchname:
824 if branchname:
826 extra["branch"] = branchname
825 extra["branch"] = branchname
827 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
826 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
828 user, date, extra)
827 user, date, extra)
829 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
828 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
830 parent2=xp2)
829 parent2=xp2)
831 tr.close()
830 tr.close()
832
831
833 if self.branchcache and "branch" in extra:
832 if self.branchcache and "branch" in extra:
834 self.branchcache[util.tolocal(extra["branch"])] = n
833 self.branchcache[util.tolocal(extra["branch"])] = n
835
834
836 if use_dirstate or update_dirstate:
835 if use_dirstate or update_dirstate:
837 self.dirstate.setparents(n)
836 self.dirstate.setparents(n)
838 if use_dirstate:
837 if use_dirstate:
839 self.dirstate.update(new, "n")
838 self.dirstate.update(new, "n")
840 self.dirstate.forget(removed)
839 self.dirstate.forget(removed)
841
840
842 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
841 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
843 return n
842 return n
844
843
845 def walk(self, node=None, files=[], match=util.always, badmatch=None):
844 def walk(self, node=None, files=[], match=util.always, badmatch=None):
846 '''
845 '''
847 walk recursively through the directory tree or a given
846 walk recursively through the directory tree or a given
848 changeset, finding all files matched by the match
847 changeset, finding all files matched by the match
849 function
848 function
850
849
851 results are yielded in a tuple (src, filename), where src
850 results are yielded in a tuple (src, filename), where src
852 is one of:
851 is one of:
853 'f' the file was found in the directory tree
852 'f' the file was found in the directory tree
854 'm' the file was only in the dirstate and not in the tree
853 'm' the file was only in the dirstate and not in the tree
855 'b' file was not found and matched badmatch
854 'b' file was not found and matched badmatch
856 '''
855 '''
857
856
858 if node:
857 if node:
859 fdict = dict.fromkeys(files)
858 fdict = dict.fromkeys(files)
860 for fn in self.manifest.read(self.changelog.read(node)[0]):
859 for fn in self.manifest.read(self.changelog.read(node)[0]):
861 for ffn in fdict:
860 for ffn in fdict:
862 # match if the file is the exact name or a directory
861 # match if the file is the exact name or a directory
863 if ffn == fn or fn.startswith("%s/" % ffn):
862 if ffn == fn or fn.startswith("%s/" % ffn):
864 del fdict[ffn]
863 del fdict[ffn]
865 break
864 break
866 if match(fn):
865 if match(fn):
867 yield 'm', fn
866 yield 'm', fn
868 for fn in fdict:
867 for fn in fdict:
869 if badmatch and badmatch(fn):
868 if badmatch and badmatch(fn):
870 if match(fn):
869 if match(fn):
871 yield 'b', fn
870 yield 'b', fn
872 else:
871 else:
873 self.ui.warn(_('%s: No such file in rev %s\n') % (
872 self.ui.warn(_('%s: No such file in rev %s\n') % (
874 util.pathto(self.getcwd(), fn), short(node)))
873 util.pathto(self.getcwd(), fn), short(node)))
875 else:
874 else:
876 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
875 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
877 yield src, fn
876 yield src, fn
878
877
879 def status(self, node1=None, node2=None, files=[], match=util.always,
878 def status(self, node1=None, node2=None, files=[], match=util.always,
880 wlock=None, list_ignored=False, list_clean=False):
879 wlock=None, list_ignored=False, list_clean=False):
881 """return status of files between two nodes or node and working directory
880 """return status of files between two nodes or node and working directory
882
881
883 If node1 is None, use the first dirstate parent instead.
882 If node1 is None, use the first dirstate parent instead.
884 If node2 is None, compare node1 with working directory.
883 If node2 is None, compare node1 with working directory.
885 """
884 """
886
885
887 def fcmp(fn, getnode):
886 def fcmp(fn, getnode):
888 t1 = self.wread(fn)
887 t1 = self.wread(fn)
889 return self.file(fn).cmp(getnode(fn), t1)
888 return self.file(fn).cmp(getnode(fn), t1)
890
889
891 def mfmatches(node):
890 def mfmatches(node):
892 change = self.changelog.read(node)
891 change = self.changelog.read(node)
893 mf = self.manifest.read(change[0]).copy()
892 mf = self.manifest.read(change[0]).copy()
894 for fn in mf.keys():
893 for fn in mf.keys():
895 if not match(fn):
894 if not match(fn):
896 del mf[fn]
895 del mf[fn]
897 return mf
896 return mf
898
897
899 modified, added, removed, deleted, unknown = [], [], [], [], []
898 modified, added, removed, deleted, unknown = [], [], [], [], []
900 ignored, clean = [], []
899 ignored, clean = [], []
901
900
902 compareworking = False
901 compareworking = False
903 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
902 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
904 compareworking = True
903 compareworking = True
905
904
906 if not compareworking:
905 if not compareworking:
907 # read the manifest from node1 before the manifest from node2,
906 # read the manifest from node1 before the manifest from node2,
908 # so that we'll hit the manifest cache if we're going through
907 # so that we'll hit the manifest cache if we're going through
909 # all the revisions in parent->child order.
908 # all the revisions in parent->child order.
910 mf1 = mfmatches(node1)
909 mf1 = mfmatches(node1)
911
910
912 # are we comparing the working directory?
911 # are we comparing the working directory?
913 if not node2:
912 if not node2:
914 if not wlock:
913 if not wlock:
915 try:
914 try:
916 wlock = self.wlock(wait=0)
915 wlock = self.wlock(wait=0)
917 except lock.LockException:
916 except lock.LockException:
918 wlock = None
917 wlock = None
919 (lookup, modified, added, removed, deleted, unknown,
918 (lookup, modified, added, removed, deleted, unknown,
920 ignored, clean) = self.dirstate.status(files, match,
919 ignored, clean) = self.dirstate.status(files, match,
921 list_ignored, list_clean)
920 list_ignored, list_clean)
922
921
923 # are we comparing working dir against its parent?
922 # are we comparing working dir against its parent?
924 if compareworking:
923 if compareworking:
925 if lookup:
924 if lookup:
926 # do a full compare of any files that might have changed
925 # do a full compare of any files that might have changed
927 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
926 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
928 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
927 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
929 nullid)
928 nullid)
930 for f in lookup:
929 for f in lookup:
931 if fcmp(f, getnode):
930 if fcmp(f, getnode):
932 modified.append(f)
931 modified.append(f)
933 else:
932 else:
934 clean.append(f)
933 clean.append(f)
935 if wlock is not None:
934 if wlock is not None:
936 self.dirstate.update([f], "n")
935 self.dirstate.update([f], "n")
937 else:
936 else:
938 # we are comparing working dir against non-parent
937 # we are comparing working dir against non-parent
939 # generate a pseudo-manifest for the working dir
938 # generate a pseudo-manifest for the working dir
940 # XXX: create it in dirstate.py ?
939 # XXX: create it in dirstate.py ?
941 mf2 = mfmatches(self.dirstate.parents()[0])
940 mf2 = mfmatches(self.dirstate.parents()[0])
942 is_exec = util.execfunc(self.root, mf2.execf)
941 is_exec = util.execfunc(self.root, mf2.execf)
943 is_link = util.linkfunc(self.root, mf2.linkf)
942 is_link = util.linkfunc(self.root, mf2.linkf)
944 for f in lookup + modified + added:
943 for f in lookup + modified + added:
945 mf2[f] = ""
944 mf2[f] = ""
946 mf2.set(f, is_exec(f), is_link(f))
945 mf2.set(f, is_exec(f), is_link(f))
947 for f in removed:
946 for f in removed:
948 if f in mf2:
947 if f in mf2:
949 del mf2[f]
948 del mf2[f]
950 else:
949 else:
951 # we are comparing two revisions
950 # we are comparing two revisions
952 mf2 = mfmatches(node2)
951 mf2 = mfmatches(node2)
953
952
954 if not compareworking:
953 if not compareworking:
955 # flush lists from dirstate before comparing manifests
954 # flush lists from dirstate before comparing manifests
956 modified, added, clean = [], [], []
955 modified, added, clean = [], [], []
957
956
958 # make sure to sort the files so we talk to the disk in a
957 # make sure to sort the files so we talk to the disk in a
959 # reasonable order
958 # reasonable order
960 mf2keys = mf2.keys()
959 mf2keys = mf2.keys()
961 mf2keys.sort()
960 mf2keys.sort()
962 getnode = lambda fn: mf1.get(fn, nullid)
961 getnode = lambda fn: mf1.get(fn, nullid)
963 for fn in mf2keys:
962 for fn in mf2keys:
964 if mf1.has_key(fn):
963 if mf1.has_key(fn):
965 if mf1.flags(fn) != mf2.flags(fn) or \
964 if mf1.flags(fn) != mf2.flags(fn) or \
966 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
965 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
967 fcmp(fn, getnode))):
966 fcmp(fn, getnode))):
968 modified.append(fn)
967 modified.append(fn)
969 elif list_clean:
968 elif list_clean:
970 clean.append(fn)
969 clean.append(fn)
971 del mf1[fn]
970 del mf1[fn]
972 else:
971 else:
973 added.append(fn)
972 added.append(fn)
974
973
975 removed = mf1.keys()
974 removed = mf1.keys()
976
975
977 # sort and return results:
976 # sort and return results:
978 for l in modified, added, removed, deleted, unknown, ignored, clean:
977 for l in modified, added, removed, deleted, unknown, ignored, clean:
979 l.sort()
978 l.sort()
980 return (modified, added, removed, deleted, unknown, ignored, clean)
979 return (modified, added, removed, deleted, unknown, ignored, clean)
981
980
982 def add(self, list, wlock=None):
981 def add(self, list, wlock=None):
983 if not wlock:
982 if not wlock:
984 wlock = self.wlock()
983 wlock = self.wlock()
985 for f in list:
984 for f in list:
986 p = self.wjoin(f)
985 p = self.wjoin(f)
987 islink = os.path.islink(p)
986 islink = os.path.islink(p)
988 if not islink and not os.path.exists(p):
987 if not islink and not os.path.exists(p):
989 self.ui.warn(_("%s does not exist!\n") % f)
988 self.ui.warn(_("%s does not exist!\n") % f)
990 elif not islink and not os.path.isfile(p):
989 elif not islink and not os.path.isfile(p):
991 self.ui.warn(_("%s not added: only files and symlinks "
990 self.ui.warn(_("%s not added: only files and symlinks "
992 "supported currently\n") % f)
991 "supported currently\n") % f)
993 elif self.dirstate.state(f) in 'an':
992 elif self.dirstate.state(f) in 'an':
994 self.ui.warn(_("%s already tracked!\n") % f)
993 self.ui.warn(_("%s already tracked!\n") % f)
995 else:
994 else:
996 self.dirstate.update([f], "a")
995 self.dirstate.update([f], "a")
997
996
998 def forget(self, list, wlock=None):
997 def forget(self, list, wlock=None):
999 if not wlock:
998 if not wlock:
1000 wlock = self.wlock()
999 wlock = self.wlock()
1001 for f in list:
1000 for f in list:
1002 if self.dirstate.state(f) not in 'ai':
1001 if self.dirstate.state(f) not in 'ai':
1003 self.ui.warn(_("%s not added!\n") % f)
1002 self.ui.warn(_("%s not added!\n") % f)
1004 else:
1003 else:
1005 self.dirstate.forget([f])
1004 self.dirstate.forget([f])
1006
1005
1007 def remove(self, list, unlink=False, wlock=None):
1006 def remove(self, list, unlink=False, wlock=None):
1008 if unlink:
1007 if unlink:
1009 for f in list:
1008 for f in list:
1010 try:
1009 try:
1011 util.unlink(self.wjoin(f))
1010 util.unlink(self.wjoin(f))
1012 except OSError, inst:
1011 except OSError, inst:
1013 if inst.errno != errno.ENOENT:
1012 if inst.errno != errno.ENOENT:
1014 raise
1013 raise
1015 if not wlock:
1014 if not wlock:
1016 wlock = self.wlock()
1015 wlock = self.wlock()
1017 for f in list:
1016 for f in list:
1018 p = self.wjoin(f)
1017 p = self.wjoin(f)
1019 if os.path.exists(p):
1018 if os.path.exists(p):
1020 self.ui.warn(_("%s still exists!\n") % f)
1019 self.ui.warn(_("%s still exists!\n") % f)
1021 elif self.dirstate.state(f) == 'a':
1020 elif self.dirstate.state(f) == 'a':
1022 self.dirstate.forget([f])
1021 self.dirstate.forget([f])
1023 elif f not in self.dirstate:
1022 elif f not in self.dirstate:
1024 self.ui.warn(_("%s not tracked!\n") % f)
1023 self.ui.warn(_("%s not tracked!\n") % f)
1025 else:
1024 else:
1026 self.dirstate.update([f], "r")
1025 self.dirstate.update([f], "r")
1027
1026
1028 def undelete(self, list, wlock=None):
1027 def undelete(self, list, wlock=None):
1029 p = self.dirstate.parents()[0]
1028 p = self.dirstate.parents()[0]
1030 mn = self.changelog.read(p)[0]
1029 mn = self.changelog.read(p)[0]
1031 m = self.manifest.read(mn)
1030 m = self.manifest.read(mn)
1032 if not wlock:
1031 if not wlock:
1033 wlock = self.wlock()
1032 wlock = self.wlock()
1034 for f in list:
1033 for f in list:
1035 if self.dirstate.state(f) not in "r":
1034 if self.dirstate.state(f) not in "r":
1036 self.ui.warn("%s not removed!\n" % f)
1035 self.ui.warn("%s not removed!\n" % f)
1037 else:
1036 else:
1038 t = self.file(f).read(m[f])
1037 t = self.file(f).read(m[f])
1039 self.wwrite(f, t, m.flags(f))
1038 self.wwrite(f, t, m.flags(f))
1040 self.dirstate.update([f], "n")
1039 self.dirstate.update([f], "n")
1041
1040
1042 def copy(self, source, dest, wlock=None):
1041 def copy(self, source, dest, wlock=None):
1043 p = self.wjoin(dest)
1042 p = self.wjoin(dest)
1044 if not os.path.exists(p):
1043 if not os.path.exists(p):
1045 self.ui.warn(_("%s does not exist!\n") % dest)
1044 self.ui.warn(_("%s does not exist!\n") % dest)
1046 elif not os.path.isfile(p):
1045 elif not os.path.isfile(p):
1047 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1046 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1048 else:
1047 else:
1049 if not wlock:
1048 if not wlock:
1050 wlock = self.wlock()
1049 wlock = self.wlock()
1051 if self.dirstate.state(dest) == '?':
1050 if self.dirstate.state(dest) == '?':
1052 self.dirstate.update([dest], "a")
1051 self.dirstate.update([dest], "a")
1053 self.dirstate.copy(source, dest)
1052 self.dirstate.copy(source, dest)
1054
1053
1055 def heads(self, start=None):
1054 def heads(self, start=None):
1056 heads = self.changelog.heads(start)
1055 heads = self.changelog.heads(start)
1057 # sort the output in rev descending order
1056 # sort the output in rev descending order
1058 heads = [(-self.changelog.rev(h), h) for h in heads]
1057 heads = [(-self.changelog.rev(h), h) for h in heads]
1059 heads.sort()
1058 heads.sort()
1060 return [n for (r, n) in heads]
1059 return [n for (r, n) in heads]
1061
1060
1062 def branches(self, nodes):
1061 def branches(self, nodes):
1063 if not nodes:
1062 if not nodes:
1064 nodes = [self.changelog.tip()]
1063 nodes = [self.changelog.tip()]
1065 b = []
1064 b = []
1066 for n in nodes:
1065 for n in nodes:
1067 t = n
1066 t = n
1068 while 1:
1067 while 1:
1069 p = self.changelog.parents(n)
1068 p = self.changelog.parents(n)
1070 if p[1] != nullid or p[0] == nullid:
1069 if p[1] != nullid or p[0] == nullid:
1071 b.append((t, n, p[0], p[1]))
1070 b.append((t, n, p[0], p[1]))
1072 break
1071 break
1073 n = p[0]
1072 n = p[0]
1074 return b
1073 return b
1075
1074
1076 def between(self, pairs):
1075 def between(self, pairs):
1077 r = []
1076 r = []
1078
1077
1079 for top, bottom in pairs:
1078 for top, bottom in pairs:
1080 n, l, i = top, [], 0
1079 n, l, i = top, [], 0
1081 f = 1
1080 f = 1
1082
1081
1083 while n != bottom:
1082 while n != bottom:
1084 p = self.changelog.parents(n)[0]
1083 p = self.changelog.parents(n)[0]
1085 if i == f:
1084 if i == f:
1086 l.append(n)
1085 l.append(n)
1087 f = f * 2
1086 f = f * 2
1088 n = p
1087 n = p
1089 i += 1
1088 i += 1
1090
1089
1091 r.append(l)
1090 r.append(l)
1092
1091
1093 return r
1092 return r
1094
1093
1095 def findincoming(self, remote, base=None, heads=None, force=False):
1094 def findincoming(self, remote, base=None, heads=None, force=False):
1096 """Return list of roots of the subsets of missing nodes from remote
1095 """Return list of roots of the subsets of missing nodes from remote
1097
1096
1098 If base dict is specified, assume that these nodes and their parents
1097 If base dict is specified, assume that these nodes and their parents
1099 exist on the remote side and that no child of a node of base exists
1098 exist on the remote side and that no child of a node of base exists
1100 in both remote and self.
1099 in both remote and self.
1101 Furthermore base will be updated to include the nodes that exists
1100 Furthermore base will be updated to include the nodes that exists
1102 in self and remote but no children exists in self and remote.
1101 in self and remote but no children exists in self and remote.
1103 If a list of heads is specified, return only nodes which are heads
1102 If a list of heads is specified, return only nodes which are heads
1104 or ancestors of these heads.
1103 or ancestors of these heads.
1105
1104
1106 All the ancestors of base are in self and in remote.
1105 All the ancestors of base are in self and in remote.
1107 All the descendants of the list returned are missing in self.
1106 All the descendants of the list returned are missing in self.
1108 (and so we know that the rest of the nodes are missing in remote, see
1107 (and so we know that the rest of the nodes are missing in remote, see
1109 outgoing)
1108 outgoing)
1110 """
1109 """
1111 m = self.changelog.nodemap
1110 m = self.changelog.nodemap
1112 search = []
1111 search = []
1113 fetch = {}
1112 fetch = {}
1114 seen = {}
1113 seen = {}
1115 seenbranch = {}
1114 seenbranch = {}
1116 if base == None:
1115 if base == None:
1117 base = {}
1116 base = {}
1118
1117
1119 if not heads:
1118 if not heads:
1120 heads = remote.heads()
1119 heads = remote.heads()
1121
1120
1122 if self.changelog.tip() == nullid:
1121 if self.changelog.tip() == nullid:
1123 base[nullid] = 1
1122 base[nullid] = 1
1124 if heads != [nullid]:
1123 if heads != [nullid]:
1125 return [nullid]
1124 return [nullid]
1126 return []
1125 return []
1127
1126
1128 # assume we're closer to the tip than the root
1127 # assume we're closer to the tip than the root
1129 # and start by examining the heads
1128 # and start by examining the heads
1130 self.ui.status(_("searching for changes\n"))
1129 self.ui.status(_("searching for changes\n"))
1131
1130
1132 unknown = []
1131 unknown = []
1133 for h in heads:
1132 for h in heads:
1134 if h not in m:
1133 if h not in m:
1135 unknown.append(h)
1134 unknown.append(h)
1136 else:
1135 else:
1137 base[h] = 1
1136 base[h] = 1
1138
1137
1139 if not unknown:
1138 if not unknown:
1140 return []
1139 return []
1141
1140
1142 req = dict.fromkeys(unknown)
1141 req = dict.fromkeys(unknown)
1143 reqcnt = 0
1142 reqcnt = 0
1144
1143
1145 # search through remote branches
1144 # search through remote branches
1146 # a 'branch' here is a linear segment of history, with four parts:
1145 # a 'branch' here is a linear segment of history, with four parts:
1147 # head, root, first parent, second parent
1146 # head, root, first parent, second parent
1148 # (a branch always has two parents (or none) by definition)
1147 # (a branch always has two parents (or none) by definition)
1149 unknown = remote.branches(unknown)
1148 unknown = remote.branches(unknown)
1150 while unknown:
1149 while unknown:
1151 r = []
1150 r = []
1152 while unknown:
1151 while unknown:
1153 n = unknown.pop(0)
1152 n = unknown.pop(0)
1154 if n[0] in seen:
1153 if n[0] in seen:
1155 continue
1154 continue
1156
1155
1157 self.ui.debug(_("examining %s:%s\n")
1156 self.ui.debug(_("examining %s:%s\n")
1158 % (short(n[0]), short(n[1])))
1157 % (short(n[0]), short(n[1])))
1159 if n[0] == nullid: # found the end of the branch
1158 if n[0] == nullid: # found the end of the branch
1160 pass
1159 pass
1161 elif n in seenbranch:
1160 elif n in seenbranch:
1162 self.ui.debug(_("branch already found\n"))
1161 self.ui.debug(_("branch already found\n"))
1163 continue
1162 continue
1164 elif n[1] and n[1] in m: # do we know the base?
1163 elif n[1] and n[1] in m: # do we know the base?
1165 self.ui.debug(_("found incomplete branch %s:%s\n")
1164 self.ui.debug(_("found incomplete branch %s:%s\n")
1166 % (short(n[0]), short(n[1])))
1165 % (short(n[0]), short(n[1])))
1167 search.append(n) # schedule branch range for scanning
1166 search.append(n) # schedule branch range for scanning
1168 seenbranch[n] = 1
1167 seenbranch[n] = 1
1169 else:
1168 else:
1170 if n[1] not in seen and n[1] not in fetch:
1169 if n[1] not in seen and n[1] not in fetch:
1171 if n[2] in m and n[3] in m:
1170 if n[2] in m and n[3] in m:
1172 self.ui.debug(_("found new changeset %s\n") %
1171 self.ui.debug(_("found new changeset %s\n") %
1173 short(n[1]))
1172 short(n[1]))
1174 fetch[n[1]] = 1 # earliest unknown
1173 fetch[n[1]] = 1 # earliest unknown
1175 for p in n[2:4]:
1174 for p in n[2:4]:
1176 if p in m:
1175 if p in m:
1177 base[p] = 1 # latest known
1176 base[p] = 1 # latest known
1178
1177
1179 for p in n[2:4]:
1178 for p in n[2:4]:
1180 if p not in req and p not in m:
1179 if p not in req and p not in m:
1181 r.append(p)
1180 r.append(p)
1182 req[p] = 1
1181 req[p] = 1
1183 seen[n[0]] = 1
1182 seen[n[0]] = 1
1184
1183
1185 if r:
1184 if r:
1186 reqcnt += 1
1185 reqcnt += 1
1187 self.ui.debug(_("request %d: %s\n") %
1186 self.ui.debug(_("request %d: %s\n") %
1188 (reqcnt, " ".join(map(short, r))))
1187 (reqcnt, " ".join(map(short, r))))
1189 for p in xrange(0, len(r), 10):
1188 for p in xrange(0, len(r), 10):
1190 for b in remote.branches(r[p:p+10]):
1189 for b in remote.branches(r[p:p+10]):
1191 self.ui.debug(_("received %s:%s\n") %
1190 self.ui.debug(_("received %s:%s\n") %
1192 (short(b[0]), short(b[1])))
1191 (short(b[0]), short(b[1])))
1193 unknown.append(b)
1192 unknown.append(b)
1194
1193
1195 # do binary search on the branches we found
1194 # do binary search on the branches we found
1196 while search:
1195 while search:
1197 n = search.pop(0)
1196 n = search.pop(0)
1198 reqcnt += 1
1197 reqcnt += 1
1199 l = remote.between([(n[0], n[1])])[0]
1198 l = remote.between([(n[0], n[1])])[0]
1200 l.append(n[1])
1199 l.append(n[1])
1201 p = n[0]
1200 p = n[0]
1202 f = 1
1201 f = 1
1203 for i in l:
1202 for i in l:
1204 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1203 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1205 if i in m:
1204 if i in m:
1206 if f <= 2:
1205 if f <= 2:
1207 self.ui.debug(_("found new branch changeset %s\n") %
1206 self.ui.debug(_("found new branch changeset %s\n") %
1208 short(p))
1207 short(p))
1209 fetch[p] = 1
1208 fetch[p] = 1
1210 base[i] = 1
1209 base[i] = 1
1211 else:
1210 else:
1212 self.ui.debug(_("narrowed branch search to %s:%s\n")
1211 self.ui.debug(_("narrowed branch search to %s:%s\n")
1213 % (short(p), short(i)))
1212 % (short(p), short(i)))
1214 search.append((p, i))
1213 search.append((p, i))
1215 break
1214 break
1216 p, f = i, f * 2
1215 p, f = i, f * 2
1217
1216
1218 # sanity check our fetch list
1217 # sanity check our fetch list
1219 for f in fetch.keys():
1218 for f in fetch.keys():
1220 if f in m:
1219 if f in m:
1221 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1220 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1222
1221
1223 if base.keys() == [nullid]:
1222 if base.keys() == [nullid]:
1224 if force:
1223 if force:
1225 self.ui.warn(_("warning: repository is unrelated\n"))
1224 self.ui.warn(_("warning: repository is unrelated\n"))
1226 else:
1225 else:
1227 raise util.Abort(_("repository is unrelated"))
1226 raise util.Abort(_("repository is unrelated"))
1228
1227
1229 self.ui.debug(_("found new changesets starting at ") +
1228 self.ui.debug(_("found new changesets starting at ") +
1230 " ".join([short(f) for f in fetch]) + "\n")
1229 " ".join([short(f) for f in fetch]) + "\n")
1231
1230
1232 self.ui.debug(_("%d total queries\n") % reqcnt)
1231 self.ui.debug(_("%d total queries\n") % reqcnt)
1233
1232
1234 return fetch.keys()
1233 return fetch.keys()
1235
1234
1236 def findoutgoing(self, remote, base=None, heads=None, force=False):
1235 def findoutgoing(self, remote, base=None, heads=None, force=False):
1237 """Return list of nodes that are roots of subsets not in remote
1236 """Return list of nodes that are roots of subsets not in remote
1238
1237
1239 If base dict is specified, assume that these nodes and their parents
1238 If base dict is specified, assume that these nodes and their parents
1240 exist on the remote side.
1239 exist on the remote side.
1241 If a list of heads is specified, return only nodes which are heads
1240 If a list of heads is specified, return only nodes which are heads
1242 or ancestors of these heads, and return a second element which
1241 or ancestors of these heads, and return a second element which
1243 contains all remote heads which get new children.
1242 contains all remote heads which get new children.
1244 """
1243 """
1245 if base == None:
1244 if base == None:
1246 base = {}
1245 base = {}
1247 self.findincoming(remote, base, heads, force=force)
1246 self.findincoming(remote, base, heads, force=force)
1248
1247
1249 self.ui.debug(_("common changesets up to ")
1248 self.ui.debug(_("common changesets up to ")
1250 + " ".join(map(short, base.keys())) + "\n")
1249 + " ".join(map(short, base.keys())) + "\n")
1251
1250
1252 remain = dict.fromkeys(self.changelog.nodemap)
1251 remain = dict.fromkeys(self.changelog.nodemap)
1253
1252
1254 # prune everything remote has from the tree
1253 # prune everything remote has from the tree
1255 del remain[nullid]
1254 del remain[nullid]
1256 remove = base.keys()
1255 remove = base.keys()
1257 while remove:
1256 while remove:
1258 n = remove.pop(0)
1257 n = remove.pop(0)
1259 if n in remain:
1258 if n in remain:
1260 del remain[n]
1259 del remain[n]
1261 for p in self.changelog.parents(n):
1260 for p in self.changelog.parents(n):
1262 remove.append(p)
1261 remove.append(p)
1263
1262
1264 # find every node whose parents have been pruned
1263 # find every node whose parents have been pruned
1265 subset = []
1264 subset = []
1266 # find every remote head that will get new children
1265 # find every remote head that will get new children
1267 updated_heads = {}
1266 updated_heads = {}
1268 for n in remain:
1267 for n in remain:
1269 p1, p2 = self.changelog.parents(n)
1268 p1, p2 = self.changelog.parents(n)
1270 if p1 not in remain and p2 not in remain:
1269 if p1 not in remain and p2 not in remain:
1271 subset.append(n)
1270 subset.append(n)
1272 if heads:
1271 if heads:
1273 if p1 in heads:
1272 if p1 in heads:
1274 updated_heads[p1] = True
1273 updated_heads[p1] = True
1275 if p2 in heads:
1274 if p2 in heads:
1276 updated_heads[p2] = True
1275 updated_heads[p2] = True
1277
1276
1278 # this is the set of all roots we have to push
1277 # this is the set of all roots we have to push
1279 if heads:
1278 if heads:
1280 return subset, updated_heads.keys()
1279 return subset, updated_heads.keys()
1281 else:
1280 else:
1282 return subset
1281 return subset
1283
1282
1284 def pull(self, remote, heads=None, force=False, lock=None):
1283 def pull(self, remote, heads=None, force=False, lock=None):
1285 mylock = False
1284 mylock = False
1286 if not lock:
1285 if not lock:
1287 lock = self.lock()
1286 lock = self.lock()
1288 mylock = True
1287 mylock = True
1289
1288
1290 try:
1289 try:
1291 fetch = self.findincoming(remote, force=force)
1290 fetch = self.findincoming(remote, force=force)
1292 if fetch == [nullid]:
1291 if fetch == [nullid]:
1293 self.ui.status(_("requesting all changes\n"))
1292 self.ui.status(_("requesting all changes\n"))
1294
1293
1295 if not fetch:
1294 if not fetch:
1296 self.ui.status(_("no changes found\n"))
1295 self.ui.status(_("no changes found\n"))
1297 return 0
1296 return 0
1298
1297
1299 if heads is None:
1298 if heads is None:
1300 cg = remote.changegroup(fetch, 'pull')
1299 cg = remote.changegroup(fetch, 'pull')
1301 else:
1300 else:
1302 if 'changegroupsubset' not in remote.capabilities:
1301 if 'changegroupsubset' not in remote.capabilities:
1303 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1302 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1304 cg = remote.changegroupsubset(fetch, heads, 'pull')
1303 cg = remote.changegroupsubset(fetch, heads, 'pull')
1305 return self.addchangegroup(cg, 'pull', remote.url())
1304 return self.addchangegroup(cg, 'pull', remote.url())
1306 finally:
1305 finally:
1307 if mylock:
1306 if mylock:
1308 lock.release()
1307 lock.release()
1309
1308
1310 def push(self, remote, force=False, revs=None):
1309 def push(self, remote, force=False, revs=None):
1311 # there are two ways to push to remote repo:
1310 # there are two ways to push to remote repo:
1312 #
1311 #
1313 # addchangegroup assumes local user can lock remote
1312 # addchangegroup assumes local user can lock remote
1314 # repo (local filesystem, old ssh servers).
1313 # repo (local filesystem, old ssh servers).
1315 #
1314 #
1316 # unbundle assumes local user cannot lock remote repo (new ssh
1315 # unbundle assumes local user cannot lock remote repo (new ssh
1317 # servers, http servers).
1316 # servers, http servers).
1318
1317
1319 if remote.capable('unbundle'):
1318 if remote.capable('unbundle'):
1320 return self.push_unbundle(remote, force, revs)
1319 return self.push_unbundle(remote, force, revs)
1321 return self.push_addchangegroup(remote, force, revs)
1320 return self.push_addchangegroup(remote, force, revs)
1322
1321
1323 def prepush(self, remote, force, revs):
1322 def prepush(self, remote, force, revs):
1324 base = {}
1323 base = {}
1325 remote_heads = remote.heads()
1324 remote_heads = remote.heads()
1326 inc = self.findincoming(remote, base, remote_heads, force=force)
1325 inc = self.findincoming(remote, base, remote_heads, force=force)
1327
1326
1328 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1327 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1329 if revs is not None:
1328 if revs is not None:
1330 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1329 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1331 else:
1330 else:
1332 bases, heads = update, self.changelog.heads()
1331 bases, heads = update, self.changelog.heads()
1333
1332
1334 if not bases:
1333 if not bases:
1335 self.ui.status(_("no changes found\n"))
1334 self.ui.status(_("no changes found\n"))
1336 return None, 1
1335 return None, 1
1337 elif not force:
1336 elif not force:
1338 # check if we're creating new remote heads
1337 # check if we're creating new remote heads
1339 # to be a remote head after push, node must be either
1338 # to be a remote head after push, node must be either
1340 # - unknown locally
1339 # - unknown locally
1341 # - a local outgoing head descended from update
1340 # - a local outgoing head descended from update
1342 # - a remote head that's known locally and not
1341 # - a remote head that's known locally and not
1343 # ancestral to an outgoing head
1342 # ancestral to an outgoing head
1344
1343
1345 warn = 0
1344 warn = 0
1346
1345
1347 if remote_heads == [nullid]:
1346 if remote_heads == [nullid]:
1348 warn = 0
1347 warn = 0
1349 elif not revs and len(heads) > len(remote_heads):
1348 elif not revs and len(heads) > len(remote_heads):
1350 warn = 1
1349 warn = 1
1351 else:
1350 else:
1352 newheads = list(heads)
1351 newheads = list(heads)
1353 for r in remote_heads:
1352 for r in remote_heads:
1354 if r in self.changelog.nodemap:
1353 if r in self.changelog.nodemap:
1355 desc = self.changelog.heads(r, heads)
1354 desc = self.changelog.heads(r, heads)
1356 l = [h for h in heads if h in desc]
1355 l = [h for h in heads if h in desc]
1357 if not l:
1356 if not l:
1358 newheads.append(r)
1357 newheads.append(r)
1359 else:
1358 else:
1360 newheads.append(r)
1359 newheads.append(r)
1361 if len(newheads) > len(remote_heads):
1360 if len(newheads) > len(remote_heads):
1362 warn = 1
1361 warn = 1
1363
1362
1364 if warn:
1363 if warn:
1365 self.ui.warn(_("abort: push creates new remote branches!\n"))
1364 self.ui.warn(_("abort: push creates new remote branches!\n"))
1366 self.ui.status(_("(did you forget to merge?"
1365 self.ui.status(_("(did you forget to merge?"
1367 " use push -f to force)\n"))
1366 " use push -f to force)\n"))
1368 return None, 1
1367 return None, 1
1369 elif inc:
1368 elif inc:
1370 self.ui.warn(_("note: unsynced remote changes!\n"))
1369 self.ui.warn(_("note: unsynced remote changes!\n"))
1371
1370
1372
1371
1373 if revs is None:
1372 if revs is None:
1374 cg = self.changegroup(update, 'push')
1373 cg = self.changegroup(update, 'push')
1375 else:
1374 else:
1376 cg = self.changegroupsubset(update, revs, 'push')
1375 cg = self.changegroupsubset(update, revs, 'push')
1377 return cg, remote_heads
1376 return cg, remote_heads
1378
1377
1379 def push_addchangegroup(self, remote, force, revs):
1378 def push_addchangegroup(self, remote, force, revs):
1380 lock = remote.lock()
1379 lock = remote.lock()
1381
1380
1382 ret = self.prepush(remote, force, revs)
1381 ret = self.prepush(remote, force, revs)
1383 if ret[0] is not None:
1382 if ret[0] is not None:
1384 cg, remote_heads = ret
1383 cg, remote_heads = ret
1385 return remote.addchangegroup(cg, 'push', self.url())
1384 return remote.addchangegroup(cg, 'push', self.url())
1386 return ret[1]
1385 return ret[1]
1387
1386
1388 def push_unbundle(self, remote, force, revs):
1387 def push_unbundle(self, remote, force, revs):
1389 # local repo finds heads on server, finds out what revs it
1388 # local repo finds heads on server, finds out what revs it
1390 # must push. once revs transferred, if server finds it has
1389 # must push. once revs transferred, if server finds it has
1391 # different heads (someone else won commit/push race), server
1390 # different heads (someone else won commit/push race), server
1392 # aborts.
1391 # aborts.
1393
1392
1394 ret = self.prepush(remote, force, revs)
1393 ret = self.prepush(remote, force, revs)
1395 if ret[0] is not None:
1394 if ret[0] is not None:
1396 cg, remote_heads = ret
1395 cg, remote_heads = ret
1397 if force: remote_heads = ['force']
1396 if force: remote_heads = ['force']
1398 return remote.unbundle(cg, remote_heads, 'push')
1397 return remote.unbundle(cg, remote_heads, 'push')
1399 return ret[1]
1398 return ret[1]
1400
1399
1401 def changegroupinfo(self, nodes):
1400 def changegroupinfo(self, nodes):
1402 self.ui.note(_("%d changesets found\n") % len(nodes))
1401 self.ui.note(_("%d changesets found\n") % len(nodes))
1403 if self.ui.debugflag:
1402 if self.ui.debugflag:
1404 self.ui.debug(_("List of changesets:\n"))
1403 self.ui.debug(_("List of changesets:\n"))
1405 for node in nodes:
1404 for node in nodes:
1406 self.ui.debug("%s\n" % hex(node))
1405 self.ui.debug("%s\n" % hex(node))
1407
1406
1408 def changegroupsubset(self, bases, heads, source):
1407 def changegroupsubset(self, bases, heads, source):
1409 """This function generates a changegroup consisting of all the nodes
1408 """This function generates a changegroup consisting of all the nodes
1410 that are descendents of any of the bases, and ancestors of any of
1409 that are descendents of any of the bases, and ancestors of any of
1411 the heads.
1410 the heads.
1412
1411
1413 It is fairly complex as determining which filenodes and which
1412 It is fairly complex as determining which filenodes and which
1414 manifest nodes need to be included for the changeset to be complete
1413 manifest nodes need to be included for the changeset to be complete
1415 is non-trivial.
1414 is non-trivial.
1416
1415
1417 Another wrinkle is doing the reverse, figuring out which changeset in
1416 Another wrinkle is doing the reverse, figuring out which changeset in
1418 the changegroup a particular filenode or manifestnode belongs to."""
1417 the changegroup a particular filenode or manifestnode belongs to."""
1419
1418
1420 self.hook('preoutgoing', throw=True, source=source)
1419 self.hook('preoutgoing', throw=True, source=source)
1421
1420
1422 # Set up some initial variables
1421 # Set up some initial variables
1423 # Make it easy to refer to self.changelog
1422 # Make it easy to refer to self.changelog
1424 cl = self.changelog
1423 cl = self.changelog
1425 # msng is short for missing - compute the list of changesets in this
1424 # msng is short for missing - compute the list of changesets in this
1426 # changegroup.
1425 # changegroup.
1427 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1426 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1428 self.changegroupinfo(msng_cl_lst)
1427 self.changegroupinfo(msng_cl_lst)
1429 # Some bases may turn out to be superfluous, and some heads may be
1428 # Some bases may turn out to be superfluous, and some heads may be
1430 # too. nodesbetween will return the minimal set of bases and heads
1429 # too. nodesbetween will return the minimal set of bases and heads
1431 # necessary to re-create the changegroup.
1430 # necessary to re-create the changegroup.
1432
1431
1433 # Known heads are the list of heads that it is assumed the recipient
1432 # Known heads are the list of heads that it is assumed the recipient
1434 # of this changegroup will know about.
1433 # of this changegroup will know about.
1435 knownheads = {}
1434 knownheads = {}
1436 # We assume that all parents of bases are known heads.
1435 # We assume that all parents of bases are known heads.
1437 for n in bases:
1436 for n in bases:
1438 for p in cl.parents(n):
1437 for p in cl.parents(n):
1439 if p != nullid:
1438 if p != nullid:
1440 knownheads[p] = 1
1439 knownheads[p] = 1
1441 knownheads = knownheads.keys()
1440 knownheads = knownheads.keys()
1442 if knownheads:
1441 if knownheads:
1443 # Now that we know what heads are known, we can compute which
1442 # Now that we know what heads are known, we can compute which
1444 # changesets are known. The recipient must know about all
1443 # changesets are known. The recipient must know about all
1445 # changesets required to reach the known heads from the null
1444 # changesets required to reach the known heads from the null
1446 # changeset.
1445 # changeset.
1447 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1446 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1448 junk = None
1447 junk = None
1449 # Transform the list into an ersatz set.
1448 # Transform the list into an ersatz set.
1450 has_cl_set = dict.fromkeys(has_cl_set)
1449 has_cl_set = dict.fromkeys(has_cl_set)
1451 else:
1450 else:
1452 # If there were no known heads, the recipient cannot be assumed to
1451 # If there were no known heads, the recipient cannot be assumed to
1453 # know about any changesets.
1452 # know about any changesets.
1454 has_cl_set = {}
1453 has_cl_set = {}
1455
1454
1456 # Make it easy to refer to self.manifest
1455 # Make it easy to refer to self.manifest
1457 mnfst = self.manifest
1456 mnfst = self.manifest
1458 # We don't know which manifests are missing yet
1457 # We don't know which manifests are missing yet
1459 msng_mnfst_set = {}
1458 msng_mnfst_set = {}
1460 # Nor do we know which filenodes are missing.
1459 # Nor do we know which filenodes are missing.
1461 msng_filenode_set = {}
1460 msng_filenode_set = {}
1462
1461
1463 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1462 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1464 junk = None
1463 junk = None
1465
1464
1466 # A changeset always belongs to itself, so the changenode lookup
1465 # A changeset always belongs to itself, so the changenode lookup
1467 # function for a changenode is identity.
1466 # function for a changenode is identity.
1468 def identity(x):
1467 def identity(x):
1469 return x
1468 return x
1470
1469
1471 # A function generating function. Sets up an environment for the
1470 # A function generating function. Sets up an environment for the
1472 # inner function.
1471 # inner function.
1473 def cmp_by_rev_func(revlog):
1472 def cmp_by_rev_func(revlog):
1474 # Compare two nodes by their revision number in the environment's
1473 # Compare two nodes by their revision number in the environment's
1475 # revision history. Since the revision number both represents the
1474 # revision history. Since the revision number both represents the
1476 # most efficient order to read the nodes in, and represents a
1475 # most efficient order to read the nodes in, and represents a
1477 # topological sorting of the nodes, this function is often useful.
1476 # topological sorting of the nodes, this function is often useful.
1478 def cmp_by_rev(a, b):
1477 def cmp_by_rev(a, b):
1479 return cmp(revlog.rev(a), revlog.rev(b))
1478 return cmp(revlog.rev(a), revlog.rev(b))
1480 return cmp_by_rev
1479 return cmp_by_rev
1481
1480
1482 # If we determine that a particular file or manifest node must be a
1481 # If we determine that a particular file or manifest node must be a
1483 # node that the recipient of the changegroup will already have, we can
1482 # node that the recipient of the changegroup will already have, we can
1484 # also assume the recipient will have all the parents. This function
1483 # also assume the recipient will have all the parents. This function
1485 # prunes them from the set of missing nodes.
1484 # prunes them from the set of missing nodes.
1486 def prune_parents(revlog, hasset, msngset):
1485 def prune_parents(revlog, hasset, msngset):
1487 haslst = hasset.keys()
1486 haslst = hasset.keys()
1488 haslst.sort(cmp_by_rev_func(revlog))
1487 haslst.sort(cmp_by_rev_func(revlog))
1489 for node in haslst:
1488 for node in haslst:
1490 parentlst = [p for p in revlog.parents(node) if p != nullid]
1489 parentlst = [p for p in revlog.parents(node) if p != nullid]
1491 while parentlst:
1490 while parentlst:
1492 n = parentlst.pop()
1491 n = parentlst.pop()
1493 if n not in hasset:
1492 if n not in hasset:
1494 hasset[n] = 1
1493 hasset[n] = 1
1495 p = [p for p in revlog.parents(n) if p != nullid]
1494 p = [p for p in revlog.parents(n) if p != nullid]
1496 parentlst.extend(p)
1495 parentlst.extend(p)
1497 for n in hasset:
1496 for n in hasset:
1498 msngset.pop(n, None)
1497 msngset.pop(n, None)
1499
1498
1500 # This is a function generating function used to set up an environment
1499 # This is a function generating function used to set up an environment
1501 # for the inner function to execute in.
1500 # for the inner function to execute in.
1502 def manifest_and_file_collector(changedfileset):
1501 def manifest_and_file_collector(changedfileset):
1503 # This is an information gathering function that gathers
1502 # This is an information gathering function that gathers
1504 # information from each changeset node that goes out as part of
1503 # information from each changeset node that goes out as part of
1505 # the changegroup. The information gathered is a list of which
1504 # the changegroup. The information gathered is a list of which
1506 # manifest nodes are potentially required (the recipient may
1505 # manifest nodes are potentially required (the recipient may
1507 # already have them) and total list of all files which were
1506 # already have them) and total list of all files which were
1508 # changed in any changeset in the changegroup.
1507 # changed in any changeset in the changegroup.
1509 #
1508 #
1510 # We also remember the first changenode we saw any manifest
1509 # We also remember the first changenode we saw any manifest
1511 # referenced by so we can later determine which changenode 'owns'
1510 # referenced by so we can later determine which changenode 'owns'
1512 # the manifest.
1511 # the manifest.
1513 def collect_manifests_and_files(clnode):
1512 def collect_manifests_and_files(clnode):
1514 c = cl.read(clnode)
1513 c = cl.read(clnode)
1515 for f in c[3]:
1514 for f in c[3]:
1516 # This is to make sure we only have one instance of each
1515 # This is to make sure we only have one instance of each
1517 # filename string for each filename.
1516 # filename string for each filename.
1518 changedfileset.setdefault(f, f)
1517 changedfileset.setdefault(f, f)
1519 msng_mnfst_set.setdefault(c[0], clnode)
1518 msng_mnfst_set.setdefault(c[0], clnode)
1520 return collect_manifests_and_files
1519 return collect_manifests_and_files
1521
1520
1522 # Figure out which manifest nodes (of the ones we think might be part
1521 # Figure out which manifest nodes (of the ones we think might be part
1523 # of the changegroup) the recipient must know about and remove them
1522 # of the changegroup) the recipient must know about and remove them
1524 # from the changegroup.
1523 # from the changegroup.
1525 def prune_manifests():
1524 def prune_manifests():
1526 has_mnfst_set = {}
1525 has_mnfst_set = {}
1527 for n in msng_mnfst_set:
1526 for n in msng_mnfst_set:
1528 # If a 'missing' manifest thinks it belongs to a changenode
1527 # If a 'missing' manifest thinks it belongs to a changenode
1529 # the recipient is assumed to have, obviously the recipient
1528 # the recipient is assumed to have, obviously the recipient
1530 # must have that manifest.
1529 # must have that manifest.
1531 linknode = cl.node(mnfst.linkrev(n))
1530 linknode = cl.node(mnfst.linkrev(n))
1532 if linknode in has_cl_set:
1531 if linknode in has_cl_set:
1533 has_mnfst_set[n] = 1
1532 has_mnfst_set[n] = 1
1534 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1533 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1535
1534
1536 # Use the information collected in collect_manifests_and_files to say
1535 # Use the information collected in collect_manifests_and_files to say
1537 # which changenode any manifestnode belongs to.
1536 # which changenode any manifestnode belongs to.
1538 def lookup_manifest_link(mnfstnode):
1537 def lookup_manifest_link(mnfstnode):
1539 return msng_mnfst_set[mnfstnode]
1538 return msng_mnfst_set[mnfstnode]
1540
1539
1541 # A function generating function that sets up the initial environment
1540 # A function generating function that sets up the initial environment
1542 # the inner function.
1541 # the inner function.
1543 def filenode_collector(changedfiles):
1542 def filenode_collector(changedfiles):
1544 next_rev = [0]
1543 next_rev = [0]
1545 # This gathers information from each manifestnode included in the
1544 # This gathers information from each manifestnode included in the
1546 # changegroup about which filenodes the manifest node references
1545 # changegroup about which filenodes the manifest node references
1547 # so we can include those in the changegroup too.
1546 # so we can include those in the changegroup too.
1548 #
1547 #
1549 # It also remembers which changenode each filenode belongs to. It
1548 # It also remembers which changenode each filenode belongs to. It
1550 # does this by assuming the a filenode belongs to the changenode
1549 # does this by assuming the a filenode belongs to the changenode
1551 # the first manifest that references it belongs to.
1550 # the first manifest that references it belongs to.
1552 def collect_msng_filenodes(mnfstnode):
1551 def collect_msng_filenodes(mnfstnode):
1553 r = mnfst.rev(mnfstnode)
1552 r = mnfst.rev(mnfstnode)
1554 if r == next_rev[0]:
1553 if r == next_rev[0]:
1555 # If the last rev we looked at was the one just previous,
1554 # If the last rev we looked at was the one just previous,
1556 # we only need to see a diff.
1555 # we only need to see a diff.
1557 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1556 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1558 # For each line in the delta
1557 # For each line in the delta
1559 for dline in delta.splitlines():
1558 for dline in delta.splitlines():
1560 # get the filename and filenode for that line
1559 # get the filename and filenode for that line
1561 f, fnode = dline.split('\0')
1560 f, fnode = dline.split('\0')
1562 fnode = bin(fnode[:40])
1561 fnode = bin(fnode[:40])
1563 f = changedfiles.get(f, None)
1562 f = changedfiles.get(f, None)
1564 # And if the file is in the list of files we care
1563 # And if the file is in the list of files we care
1565 # about.
1564 # about.
1566 if f is not None:
1565 if f is not None:
1567 # Get the changenode this manifest belongs to
1566 # Get the changenode this manifest belongs to
1568 clnode = msng_mnfst_set[mnfstnode]
1567 clnode = msng_mnfst_set[mnfstnode]
1569 # Create the set of filenodes for the file if
1568 # Create the set of filenodes for the file if
1570 # there isn't one already.
1569 # there isn't one already.
1571 ndset = msng_filenode_set.setdefault(f, {})
1570 ndset = msng_filenode_set.setdefault(f, {})
1572 # And set the filenode's changelog node to the
1571 # And set the filenode's changelog node to the
1573 # manifest's if it hasn't been set already.
1572 # manifest's if it hasn't been set already.
1574 ndset.setdefault(fnode, clnode)
1573 ndset.setdefault(fnode, clnode)
1575 else:
1574 else:
1576 # Otherwise we need a full manifest.
1575 # Otherwise we need a full manifest.
1577 m = mnfst.read(mnfstnode)
1576 m = mnfst.read(mnfstnode)
1578 # For every file in we care about.
1577 # For every file in we care about.
1579 for f in changedfiles:
1578 for f in changedfiles:
1580 fnode = m.get(f, None)
1579 fnode = m.get(f, None)
1581 # If it's in the manifest
1580 # If it's in the manifest
1582 if fnode is not None:
1581 if fnode is not None:
1583 # See comments above.
1582 # See comments above.
1584 clnode = msng_mnfst_set[mnfstnode]
1583 clnode = msng_mnfst_set[mnfstnode]
1585 ndset = msng_filenode_set.setdefault(f, {})
1584 ndset = msng_filenode_set.setdefault(f, {})
1586 ndset.setdefault(fnode, clnode)
1585 ndset.setdefault(fnode, clnode)
1587 # Remember the revision we hope to see next.
1586 # Remember the revision we hope to see next.
1588 next_rev[0] = r + 1
1587 next_rev[0] = r + 1
1589 return collect_msng_filenodes
1588 return collect_msng_filenodes
1590
1589
1591 # We have a list of filenodes we think we need for a file, lets remove
1590 # We have a list of filenodes we think we need for a file, lets remove
1592 # all those we now the recipient must have.
1591 # all those we now the recipient must have.
1593 def prune_filenodes(f, filerevlog):
1592 def prune_filenodes(f, filerevlog):
1594 msngset = msng_filenode_set[f]
1593 msngset = msng_filenode_set[f]
1595 hasset = {}
1594 hasset = {}
1596 # If a 'missing' filenode thinks it belongs to a changenode we
1595 # If a 'missing' filenode thinks it belongs to a changenode we
1597 # assume the recipient must have, then the recipient must have
1596 # assume the recipient must have, then the recipient must have
1598 # that filenode.
1597 # that filenode.
1599 for n in msngset:
1598 for n in msngset:
1600 clnode = cl.node(filerevlog.linkrev(n))
1599 clnode = cl.node(filerevlog.linkrev(n))
1601 if clnode in has_cl_set:
1600 if clnode in has_cl_set:
1602 hasset[n] = 1
1601 hasset[n] = 1
1603 prune_parents(filerevlog, hasset, msngset)
1602 prune_parents(filerevlog, hasset, msngset)
1604
1603
1605 # A function generator function that sets up the a context for the
1604 # A function generator function that sets up the a context for the
1606 # inner function.
1605 # inner function.
1607 def lookup_filenode_link_func(fname):
1606 def lookup_filenode_link_func(fname):
1608 msngset = msng_filenode_set[fname]
1607 msngset = msng_filenode_set[fname]
1609 # Lookup the changenode the filenode belongs to.
1608 # Lookup the changenode the filenode belongs to.
1610 def lookup_filenode_link(fnode):
1609 def lookup_filenode_link(fnode):
1611 return msngset[fnode]
1610 return msngset[fnode]
1612 return lookup_filenode_link
1611 return lookup_filenode_link
1613
1612
1614 # Now that we have all theses utility functions to help out and
1613 # Now that we have all theses utility functions to help out and
1615 # logically divide up the task, generate the group.
1614 # logically divide up the task, generate the group.
1616 def gengroup():
1615 def gengroup():
1617 # The set of changed files starts empty.
1616 # The set of changed files starts empty.
1618 changedfiles = {}
1617 changedfiles = {}
1619 # Create a changenode group generator that will call our functions
1618 # Create a changenode group generator that will call our functions
1620 # back to lookup the owning changenode and collect information.
1619 # back to lookup the owning changenode and collect information.
1621 group = cl.group(msng_cl_lst, identity,
1620 group = cl.group(msng_cl_lst, identity,
1622 manifest_and_file_collector(changedfiles))
1621 manifest_and_file_collector(changedfiles))
1623 for chnk in group:
1622 for chnk in group:
1624 yield chnk
1623 yield chnk
1625
1624
1626 # The list of manifests has been collected by the generator
1625 # The list of manifests has been collected by the generator
1627 # calling our functions back.
1626 # calling our functions back.
1628 prune_manifests()
1627 prune_manifests()
1629 msng_mnfst_lst = msng_mnfst_set.keys()
1628 msng_mnfst_lst = msng_mnfst_set.keys()
1630 # Sort the manifestnodes by revision number.
1629 # Sort the manifestnodes by revision number.
1631 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1630 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1632 # Create a generator for the manifestnodes that calls our lookup
1631 # Create a generator for the manifestnodes that calls our lookup
1633 # and data collection functions back.
1632 # and data collection functions back.
1634 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1633 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1635 filenode_collector(changedfiles))
1634 filenode_collector(changedfiles))
1636 for chnk in group:
1635 for chnk in group:
1637 yield chnk
1636 yield chnk
1638
1637
1639 # These are no longer needed, dereference and toss the memory for
1638 # These are no longer needed, dereference and toss the memory for
1640 # them.
1639 # them.
1641 msng_mnfst_lst = None
1640 msng_mnfst_lst = None
1642 msng_mnfst_set.clear()
1641 msng_mnfst_set.clear()
1643
1642
1644 changedfiles = changedfiles.keys()
1643 changedfiles = changedfiles.keys()
1645 changedfiles.sort()
1644 changedfiles.sort()
1646 # Go through all our files in order sorted by name.
1645 # Go through all our files in order sorted by name.
1647 for fname in changedfiles:
1646 for fname in changedfiles:
1648 filerevlog = self.file(fname)
1647 filerevlog = self.file(fname)
1649 # Toss out the filenodes that the recipient isn't really
1648 # Toss out the filenodes that the recipient isn't really
1650 # missing.
1649 # missing.
1651 if msng_filenode_set.has_key(fname):
1650 if msng_filenode_set.has_key(fname):
1652 prune_filenodes(fname, filerevlog)
1651 prune_filenodes(fname, filerevlog)
1653 msng_filenode_lst = msng_filenode_set[fname].keys()
1652 msng_filenode_lst = msng_filenode_set[fname].keys()
1654 else:
1653 else:
1655 msng_filenode_lst = []
1654 msng_filenode_lst = []
1656 # If any filenodes are left, generate the group for them,
1655 # If any filenodes are left, generate the group for them,
1657 # otherwise don't bother.
1656 # otherwise don't bother.
1658 if len(msng_filenode_lst) > 0:
1657 if len(msng_filenode_lst) > 0:
1659 yield changegroup.genchunk(fname)
1658 yield changegroup.genchunk(fname)
1660 # Sort the filenodes by their revision #
1659 # Sort the filenodes by their revision #
1661 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1660 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1662 # Create a group generator and only pass in a changenode
1661 # Create a group generator and only pass in a changenode
1663 # lookup function as we need to collect no information
1662 # lookup function as we need to collect no information
1664 # from filenodes.
1663 # from filenodes.
1665 group = filerevlog.group(msng_filenode_lst,
1664 group = filerevlog.group(msng_filenode_lst,
1666 lookup_filenode_link_func(fname))
1665 lookup_filenode_link_func(fname))
1667 for chnk in group:
1666 for chnk in group:
1668 yield chnk
1667 yield chnk
1669 if msng_filenode_set.has_key(fname):
1668 if msng_filenode_set.has_key(fname):
1670 # Don't need this anymore, toss it to free memory.
1669 # Don't need this anymore, toss it to free memory.
1671 del msng_filenode_set[fname]
1670 del msng_filenode_set[fname]
1672 # Signal that no more groups are left.
1671 # Signal that no more groups are left.
1673 yield changegroup.closechunk()
1672 yield changegroup.closechunk()
1674
1673
1675 if msng_cl_lst:
1674 if msng_cl_lst:
1676 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1675 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1677
1676
1678 return util.chunkbuffer(gengroup())
1677 return util.chunkbuffer(gengroup())
1679
1678
1680 def changegroup(self, basenodes, source):
1679 def changegroup(self, basenodes, source):
1681 """Generate a changegroup of all nodes that we have that a recipient
1680 """Generate a changegroup of all nodes that we have that a recipient
1682 doesn't.
1681 doesn't.
1683
1682
1684 This is much easier than the previous function as we can assume that
1683 This is much easier than the previous function as we can assume that
1685 the recipient has any changenode we aren't sending them."""
1684 the recipient has any changenode we aren't sending them."""
1686
1685
1687 self.hook('preoutgoing', throw=True, source=source)
1686 self.hook('preoutgoing', throw=True, source=source)
1688
1687
1689 cl = self.changelog
1688 cl = self.changelog
1690 nodes = cl.nodesbetween(basenodes, None)[0]
1689 nodes = cl.nodesbetween(basenodes, None)[0]
1691 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1690 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1692 self.changegroupinfo(nodes)
1691 self.changegroupinfo(nodes)
1693
1692
1694 def identity(x):
1693 def identity(x):
1695 return x
1694 return x
1696
1695
1697 def gennodelst(revlog):
1696 def gennodelst(revlog):
1698 for r in xrange(0, revlog.count()):
1697 for r in xrange(0, revlog.count()):
1699 n = revlog.node(r)
1698 n = revlog.node(r)
1700 if revlog.linkrev(n) in revset:
1699 if revlog.linkrev(n) in revset:
1701 yield n
1700 yield n
1702
1701
1703 def changed_file_collector(changedfileset):
1702 def changed_file_collector(changedfileset):
1704 def collect_changed_files(clnode):
1703 def collect_changed_files(clnode):
1705 c = cl.read(clnode)
1704 c = cl.read(clnode)
1706 for fname in c[3]:
1705 for fname in c[3]:
1707 changedfileset[fname] = 1
1706 changedfileset[fname] = 1
1708 return collect_changed_files
1707 return collect_changed_files
1709
1708
1710 def lookuprevlink_func(revlog):
1709 def lookuprevlink_func(revlog):
1711 def lookuprevlink(n):
1710 def lookuprevlink(n):
1712 return cl.node(revlog.linkrev(n))
1711 return cl.node(revlog.linkrev(n))
1713 return lookuprevlink
1712 return lookuprevlink
1714
1713
1715 def gengroup():
1714 def gengroup():
1716 # construct a list of all changed files
1715 # construct a list of all changed files
1717 changedfiles = {}
1716 changedfiles = {}
1718
1717
1719 for chnk in cl.group(nodes, identity,
1718 for chnk in cl.group(nodes, identity,
1720 changed_file_collector(changedfiles)):
1719 changed_file_collector(changedfiles)):
1721 yield chnk
1720 yield chnk
1722 changedfiles = changedfiles.keys()
1721 changedfiles = changedfiles.keys()
1723 changedfiles.sort()
1722 changedfiles.sort()
1724
1723
1725 mnfst = self.manifest
1724 mnfst = self.manifest
1726 nodeiter = gennodelst(mnfst)
1725 nodeiter = gennodelst(mnfst)
1727 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1726 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1728 yield chnk
1727 yield chnk
1729
1728
1730 for fname in changedfiles:
1729 for fname in changedfiles:
1731 filerevlog = self.file(fname)
1730 filerevlog = self.file(fname)
1732 nodeiter = gennodelst(filerevlog)
1731 nodeiter = gennodelst(filerevlog)
1733 nodeiter = list(nodeiter)
1732 nodeiter = list(nodeiter)
1734 if nodeiter:
1733 if nodeiter:
1735 yield changegroup.genchunk(fname)
1734 yield changegroup.genchunk(fname)
1736 lookup = lookuprevlink_func(filerevlog)
1735 lookup = lookuprevlink_func(filerevlog)
1737 for chnk in filerevlog.group(nodeiter, lookup):
1736 for chnk in filerevlog.group(nodeiter, lookup):
1738 yield chnk
1737 yield chnk
1739
1738
1740 yield changegroup.closechunk()
1739 yield changegroup.closechunk()
1741
1740
1742 if nodes:
1741 if nodes:
1743 self.hook('outgoing', node=hex(nodes[0]), source=source)
1742 self.hook('outgoing', node=hex(nodes[0]), source=source)
1744
1743
1745 return util.chunkbuffer(gengroup())
1744 return util.chunkbuffer(gengroup())
1746
1745
1747 def addchangegroup(self, source, srctype, url):
1746 def addchangegroup(self, source, srctype, url):
1748 """add changegroup to repo.
1747 """add changegroup to repo.
1749
1748
1750 return values:
1749 return values:
1751 - nothing changed or no source: 0
1750 - nothing changed or no source: 0
1752 - more heads than before: 1+added heads (2..n)
1751 - more heads than before: 1+added heads (2..n)
1753 - less heads than before: -1-removed heads (-2..-n)
1752 - less heads than before: -1-removed heads (-2..-n)
1754 - number of heads stays the same: 1
1753 - number of heads stays the same: 1
1755 """
1754 """
1756 def csmap(x):
1755 def csmap(x):
1757 self.ui.debug(_("add changeset %s\n") % short(x))
1756 self.ui.debug(_("add changeset %s\n") % short(x))
1758 return cl.count()
1757 return cl.count()
1759
1758
1760 def revmap(x):
1759 def revmap(x):
1761 return cl.rev(x)
1760 return cl.rev(x)
1762
1761
1763 if not source:
1762 if not source:
1764 return 0
1763 return 0
1765
1764
1766 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1765 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1767
1766
1768 changesets = files = revisions = 0
1767 changesets = files = revisions = 0
1769
1768
1770 tr = self.transaction()
1769 tr = self.transaction()
1771
1770
1772 # write changelog data to temp files so concurrent readers will not see
1771 # write changelog data to temp files so concurrent readers will not see
1773 # inconsistent view
1772 # inconsistent view
1774 cl = None
1773 cl = None
1775 try:
1774 try:
1776 cl = appendfile.appendchangelog(self.sopener,
1775 cl = appendfile.appendchangelog(self.sopener,
1777 self.changelog.version)
1776 self.changelog.version)
1778
1777
1779 oldheads = len(cl.heads())
1778 oldheads = len(cl.heads())
1780
1779
1781 # pull off the changeset group
1780 # pull off the changeset group
1782 self.ui.status(_("adding changesets\n"))
1781 self.ui.status(_("adding changesets\n"))
1783 cor = cl.count() - 1
1782 cor = cl.count() - 1
1784 chunkiter = changegroup.chunkiter(source)
1783 chunkiter = changegroup.chunkiter(source)
1785 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1784 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1786 raise util.Abort(_("received changelog group is empty"))
1785 raise util.Abort(_("received changelog group is empty"))
1787 cnr = cl.count() - 1
1786 cnr = cl.count() - 1
1788 changesets = cnr - cor
1787 changesets = cnr - cor
1789
1788
1790 # pull off the manifest group
1789 # pull off the manifest group
1791 self.ui.status(_("adding manifests\n"))
1790 self.ui.status(_("adding manifests\n"))
1792 chunkiter = changegroup.chunkiter(source)
1791 chunkiter = changegroup.chunkiter(source)
1793 # no need to check for empty manifest group here:
1792 # no need to check for empty manifest group here:
1794 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1793 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1795 # no new manifest will be created and the manifest group will
1794 # no new manifest will be created and the manifest group will
1796 # be empty during the pull
1795 # be empty during the pull
1797 self.manifest.addgroup(chunkiter, revmap, tr)
1796 self.manifest.addgroup(chunkiter, revmap, tr)
1798
1797
1799 # process the files
1798 # process the files
1800 self.ui.status(_("adding file changes\n"))
1799 self.ui.status(_("adding file changes\n"))
1801 while 1:
1800 while 1:
1802 f = changegroup.getchunk(source)
1801 f = changegroup.getchunk(source)
1803 if not f:
1802 if not f:
1804 break
1803 break
1805 self.ui.debug(_("adding %s revisions\n") % f)
1804 self.ui.debug(_("adding %s revisions\n") % f)
1806 fl = self.file(f)
1805 fl = self.file(f)
1807 o = fl.count()
1806 o = fl.count()
1808 chunkiter = changegroup.chunkiter(source)
1807 chunkiter = changegroup.chunkiter(source)
1809 if fl.addgroup(chunkiter, revmap, tr) is None:
1808 if fl.addgroup(chunkiter, revmap, tr) is None:
1810 raise util.Abort(_("received file revlog group is empty"))
1809 raise util.Abort(_("received file revlog group is empty"))
1811 revisions += fl.count() - o
1810 revisions += fl.count() - o
1812 files += 1
1811 files += 1
1813
1812
1814 cl.writedata()
1813 cl.writedata()
1815 finally:
1814 finally:
1816 if cl:
1815 if cl:
1817 cl.cleanup()
1816 cl.cleanup()
1818
1817
1819 # make changelog see real files again
1818 # make changelog see real files again
1820 self.changelog = changelog.changelog(self.sopener,
1819 self.changelog = changelog.changelog(self.sopener,
1821 self.changelog.version)
1820 self.changelog.version)
1822 self.changelog.checkinlinesize(tr)
1821 self.changelog.checkinlinesize(tr)
1823
1822
1824 newheads = len(self.changelog.heads())
1823 newheads = len(self.changelog.heads())
1825 heads = ""
1824 heads = ""
1826 if oldheads and newheads != oldheads:
1825 if oldheads and newheads != oldheads:
1827 heads = _(" (%+d heads)") % (newheads - oldheads)
1826 heads = _(" (%+d heads)") % (newheads - oldheads)
1828
1827
1829 self.ui.status(_("added %d changesets"
1828 self.ui.status(_("added %d changesets"
1830 " with %d changes to %d files%s\n")
1829 " with %d changes to %d files%s\n")
1831 % (changesets, revisions, files, heads))
1830 % (changesets, revisions, files, heads))
1832
1831
1833 if changesets > 0:
1832 if changesets > 0:
1834 self.hook('pretxnchangegroup', throw=True,
1833 self.hook('pretxnchangegroup', throw=True,
1835 node=hex(self.changelog.node(cor+1)), source=srctype,
1834 node=hex(self.changelog.node(cor+1)), source=srctype,
1836 url=url)
1835 url=url)
1837
1836
1838 tr.close()
1837 tr.close()
1839
1838
1840 if changesets > 0:
1839 if changesets > 0:
1841 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1840 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1842 source=srctype, url=url)
1841 source=srctype, url=url)
1843
1842
1844 for i in xrange(cor + 1, cnr + 1):
1843 for i in xrange(cor + 1, cnr + 1):
1845 self.hook("incoming", node=hex(self.changelog.node(i)),
1844 self.hook("incoming", node=hex(self.changelog.node(i)),
1846 source=srctype, url=url)
1845 source=srctype, url=url)
1847
1846
1848 # never return 0 here:
1847 # never return 0 here:
1849 if newheads < oldheads:
1848 if newheads < oldheads:
1850 return newheads - oldheads - 1
1849 return newheads - oldheads - 1
1851 else:
1850 else:
1852 return newheads - oldheads + 1
1851 return newheads - oldheads + 1
1853
1852
1854
1853
1855 def stream_in(self, remote):
1854 def stream_in(self, remote):
1856 fp = remote.stream_out()
1855 fp = remote.stream_out()
1857 l = fp.readline()
1856 l = fp.readline()
1858 try:
1857 try:
1859 resp = int(l)
1858 resp = int(l)
1860 except ValueError:
1859 except ValueError:
1861 raise util.UnexpectedOutput(
1860 raise util.UnexpectedOutput(
1862 _('Unexpected response from remote server:'), l)
1861 _('Unexpected response from remote server:'), l)
1863 if resp == 1:
1862 if resp == 1:
1864 raise util.Abort(_('operation forbidden by server'))
1863 raise util.Abort(_('operation forbidden by server'))
1865 elif resp == 2:
1864 elif resp == 2:
1866 raise util.Abort(_('locking the remote repository failed'))
1865 raise util.Abort(_('locking the remote repository failed'))
1867 elif resp != 0:
1866 elif resp != 0:
1868 raise util.Abort(_('the server sent an unknown error code'))
1867 raise util.Abort(_('the server sent an unknown error code'))
1869 self.ui.status(_('streaming all changes\n'))
1868 self.ui.status(_('streaming all changes\n'))
1870 l = fp.readline()
1869 l = fp.readline()
1871 try:
1870 try:
1872 total_files, total_bytes = map(int, l.split(' ', 1))
1871 total_files, total_bytes = map(int, l.split(' ', 1))
1873 except ValueError, TypeError:
1872 except ValueError, TypeError:
1874 raise util.UnexpectedOutput(
1873 raise util.UnexpectedOutput(
1875 _('Unexpected response from remote server:'), l)
1874 _('Unexpected response from remote server:'), l)
1876 self.ui.status(_('%d files to transfer, %s of data\n') %
1875 self.ui.status(_('%d files to transfer, %s of data\n') %
1877 (total_files, util.bytecount(total_bytes)))
1876 (total_files, util.bytecount(total_bytes)))
1878 start = time.time()
1877 start = time.time()
1879 for i in xrange(total_files):
1878 for i in xrange(total_files):
1880 # XXX doesn't support '\n' or '\r' in filenames
1879 # XXX doesn't support '\n' or '\r' in filenames
1881 l = fp.readline()
1880 l = fp.readline()
1882 try:
1881 try:
1883 name, size = l.split('\0', 1)
1882 name, size = l.split('\0', 1)
1884 size = int(size)
1883 size = int(size)
1885 except ValueError, TypeError:
1884 except ValueError, TypeError:
1886 raise util.UnexpectedOutput(
1885 raise util.UnexpectedOutput(
1887 _('Unexpected response from remote server:'), l)
1886 _('Unexpected response from remote server:'), l)
1888 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1887 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1889 ofp = self.sopener(name, 'w')
1888 ofp = self.sopener(name, 'w')
1890 for chunk in util.filechunkiter(fp, limit=size):
1889 for chunk in util.filechunkiter(fp, limit=size):
1891 ofp.write(chunk)
1890 ofp.write(chunk)
1892 ofp.close()
1891 ofp.close()
1893 elapsed = time.time() - start
1892 elapsed = time.time() - start
1894 if elapsed <= 0:
1893 if elapsed <= 0:
1895 elapsed = 0.001
1894 elapsed = 0.001
1896 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1895 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1897 (util.bytecount(total_bytes), elapsed,
1896 (util.bytecount(total_bytes), elapsed,
1898 util.bytecount(total_bytes / elapsed)))
1897 util.bytecount(total_bytes / elapsed)))
1899 self.reload()
1898 self.reload()
1900 return len(self.heads()) + 1
1899 return len(self.heads()) + 1
1901
1900
1902 def clone(self, remote, heads=[], stream=False):
1901 def clone(self, remote, heads=[], stream=False):
1903 '''clone remote repository.
1902 '''clone remote repository.
1904
1903
1905 keyword arguments:
1904 keyword arguments:
1906 heads: list of revs to clone (forces use of pull)
1905 heads: list of revs to clone (forces use of pull)
1907 stream: use streaming clone if possible'''
1906 stream: use streaming clone if possible'''
1908
1907
1909 # now, all clients that can request uncompressed clones can
1908 # now, all clients that can request uncompressed clones can
1910 # read repo formats supported by all servers that can serve
1909 # read repo formats supported by all servers that can serve
1911 # them.
1910 # them.
1912
1911
1913 # if revlog format changes, client will have to check version
1912 # if revlog format changes, client will have to check version
1914 # and format flags on "stream" capability, and use
1913 # and format flags on "stream" capability, and use
1915 # uncompressed only if compatible.
1914 # uncompressed only if compatible.
1916
1915
1917 if stream and not heads and remote.capable('stream'):
1916 if stream and not heads and remote.capable('stream'):
1918 return self.stream_in(remote)
1917 return self.stream_in(remote)
1919 return self.pull(remote, heads)
1918 return self.pull(remote, heads)
1920
1919
1921 # used to avoid circular references so destructors work
1920 # used to avoid circular references so destructors work
1922 def aftertrans(files):
1921 def aftertrans(files):
1923 renamefiles = [tuple(t) for t in files]
1922 renamefiles = [tuple(t) for t in files]
1924 def a():
1923 def a():
1925 for src, dest in renamefiles:
1924 for src, dest in renamefiles:
1926 util.rename(src, dest)
1925 util.rename(src, dest)
1927 return a
1926 return a
1928
1927
1929 def instance(ui, path, create):
1928 def instance(ui, path, create):
1930 return localrepository(ui, util.drop_scheme('file', path), create)
1929 return localrepository(ui, util.drop_scheme('file', path), create)
1931
1930
1932 def islocal(path):
1931 def islocal(path):
1933 return True
1932 return True
@@ -1,61 +1,81
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # This test tries to exercise the ssh functionality with a dummy script
3 # This test tries to exercise the ssh functionality with a dummy script
4
4
5 cat <<'EOF' > dummyssh
5 cat <<'EOF' > dummyssh
6 #!/bin/sh
6 #!/bin/sh
7 # this attempts to deal with relative pathnames
7 # this attempts to deal with relative pathnames
8 cd `dirname $0`
8 cd `dirname $0`
9
9
10 # check for proper args
10 # check for proper args
11 if [ $1 != "user@dummy" ] ; then
11 if [ $1 != "user@dummy" ] ; then
12 exit -1
12 exit -1
13 fi
13 fi
14
14
15 # check that we're in the right directory
15 # check that we're in the right directory
16 if [ ! -x dummyssh ] ; then
16 if [ ! -x dummyssh ] ; then
17 exit -1
17 exit -1
18 fi
18 fi
19
19
20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
21 $2
21 $2
22 EOF
22 EOF
23 chmod +x dummyssh
23 chmod +x dummyssh
24
24
25 checknewrepo()
26 {
27 name=$1
28
29 if [ -d $name/.hg/store ]; then
30 echo store created
31 fi
32
33 if [ -f $name/.hg/00changelog.i ]; then
34 echo 00changelog.i created
35 fi
36
37 cat $name/.hg/requires
38 }
39
25 echo "# creating 'local'"
40 echo "# creating 'local'"
26 hg init local
41 hg init local
42 checknewrepo local
27 echo this > local/foo
43 echo this > local/foo
28 hg ci --cwd local -A -m "init" -d "1000000 0"
44 hg ci --cwd local -A -m "init" -d "1000000 0"
29
45
46 echo "# creating repo with old format"
47 hg --config format.usestore=false init old
48 checknewrepo old
49
30 echo "#test failure"
50 echo "#test failure"
31 hg init local
51 hg init local
32
52
33 echo "# init+push to remote2"
53 echo "# init+push to remote2"
34 hg init -e ./dummyssh ssh://user@dummy/remote2
54 hg init -e ./dummyssh ssh://user@dummy/remote2
35 hg incoming -R remote2 local
55 hg incoming -R remote2 local
36 hg push -R local -e ./dummyssh ssh://user@dummy/remote2
56 hg push -R local -e ./dummyssh ssh://user@dummy/remote2
37
57
38 echo "# clone to remote1"
58 echo "# clone to remote1"
39 hg clone -e ./dummyssh local ssh://user@dummy/remote1
59 hg clone -e ./dummyssh local ssh://user@dummy/remote1
40
60
41 echo "# init to existing repo"
61 echo "# init to existing repo"
42 hg init -e ./dummyssh ssh://user@dummy/remote1
62 hg init -e ./dummyssh ssh://user@dummy/remote1
43
63
44 echo "# clone to existing repo"
64 echo "# clone to existing repo"
45 hg clone -e ./dummyssh local ssh://user@dummy/remote1
65 hg clone -e ./dummyssh local ssh://user@dummy/remote1
46
66
47 echo "# output of dummyssh"
67 echo "# output of dummyssh"
48 cat dummylog
68 cat dummylog
49
69
50 echo "# comparing repositories"
70 echo "# comparing repositories"
51 hg tip -q -R local
71 hg tip -q -R local
52 hg tip -q -R remote1
72 hg tip -q -R remote1
53 hg tip -q -R remote2
73 hg tip -q -R remote2
54
74
55 echo "# check names for repositories (clashes with URL schemes, special chars)"
75 echo "# check names for repositories (clashes with URL schemes, special chars)"
56 for i in bundle file hg http https old-http ssh static-http " " "with space"; do
76 for i in bundle file hg http https old-http ssh static-http " " "with space"; do
57 echo "# hg init \"$i\""
77 echo "# hg init \"$i\""
58 hg init "$i"
78 hg init "$i"
59 test -d "$i" -a -d "$i/.hg" && echo "ok" || echo "failed"
79 test -d "$i" -a -d "$i/.hg" && echo "ok" || echo "failed"
60 done
80 done
61
81
@@ -1,63 +1,69
1 # creating 'local'
1 # creating 'local'
2 store created
3 00changelog.i created
4 revlogv1
5 store
2 adding foo
6 adding foo
7 # creating repo with old format
8 revlogv1
3 #test failure
9 #test failure
4 abort: repository local already exists!
10 abort: repository local already exists!
5 # init+push to remote2
11 # init+push to remote2
6 comparing with local
12 comparing with local
7 changeset: 0:c4e059d443be
13 changeset: 0:c4e059d443be
8 tag: tip
14 tag: tip
9 user: test
15 user: test
10 date: Mon Jan 12 13:46:40 1970 +0000
16 date: Mon Jan 12 13:46:40 1970 +0000
11 summary: init
17 summary: init
12
18
13 pushing to ssh://user@dummy/remote2
19 pushing to ssh://user@dummy/remote2
14 searching for changes
20 searching for changes
15 remote: adding changesets
21 remote: adding changesets
16 remote: adding manifests
22 remote: adding manifests
17 remote: adding file changes
23 remote: adding file changes
18 remote: added 1 changesets with 1 changes to 1 files
24 remote: added 1 changesets with 1 changes to 1 files
19 # clone to remote1
25 # clone to remote1
20 searching for changes
26 searching for changes
21 remote: adding changesets
27 remote: adding changesets
22 remote: adding manifests
28 remote: adding manifests
23 remote: adding file changes
29 remote: adding file changes
24 remote: added 1 changesets with 1 changes to 1 files
30 remote: added 1 changesets with 1 changes to 1 files
25 # init to existing repo
31 # init to existing repo
26 abort: repository remote1 already exists!
32 abort: repository remote1 already exists!
27 abort: could not create remote repo!
33 abort: could not create remote repo!
28 # clone to existing repo
34 # clone to existing repo
29 abort: repository remote1 already exists!
35 abort: repository remote1 already exists!
30 abort: could not create remote repo!
36 abort: could not create remote repo!
31 # output of dummyssh
37 # output of dummyssh
32 Got arguments 1:user@dummy 2:hg init remote2 3: 4: 5:
38 Got arguments 1:user@dummy 2:hg init remote2 3: 4: 5:
33 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
39 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
34 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
40 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
35 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
41 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
36 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
42 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
37 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
43 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
38 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
44 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
39 # comparing repositories
45 # comparing repositories
40 0:c4e059d443be
46 0:c4e059d443be
41 0:c4e059d443be
47 0:c4e059d443be
42 0:c4e059d443be
48 0:c4e059d443be
43 # check names for repositories (clashes with URL schemes, special chars)
49 # check names for repositories (clashes with URL schemes, special chars)
44 # hg init "bundle"
50 # hg init "bundle"
45 ok
51 ok
46 # hg init "file"
52 # hg init "file"
47 ok
53 ok
48 # hg init "hg"
54 # hg init "hg"
49 ok
55 ok
50 # hg init "http"
56 # hg init "http"
51 ok
57 ok
52 # hg init "https"
58 # hg init "https"
53 ok
59 ok
54 # hg init "old-http"
60 # hg init "old-http"
55 ok
61 ok
56 # hg init "ssh"
62 # hg init "ssh"
57 ok
63 ok
58 # hg init "static-http"
64 # hg init "static-http"
59 ok
65 ok
60 # hg init " "
66 # hg init " "
61 ok
67 ok
62 # hg init "with space"
68 # hg init "with space"
63 ok
69 ok
General Comments 0
You need to be logged in to leave comments. Login now