##// END OF EJS Templates
localrepo.walk: if we're walking a specific revision, sort the files...
Alexis S. L. Carvalho -
r4194:8e947b0e default
parent child Browse files
Show More
@@ -1,2008 +1,2013 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20 branchcache_features = ('unnamed',)
20 branchcache_features = ('unnamed',)
21
21
22 def __del__(self):
22 def __del__(self):
23 self.transhandle = None
23 self.transhandle = None
24 def __init__(self, parentui, path=None, create=0):
24 def __init__(self, parentui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 if not path:
26 if not path:
27 p = os.getcwd()
27 p = os.getcwd()
28 while not os.path.isdir(os.path.join(p, ".hg")):
28 while not os.path.isdir(os.path.join(p, ".hg")):
29 oldp = p
29 oldp = p
30 p = os.path.dirname(p)
30 p = os.path.dirname(p)
31 if p == oldp:
31 if p == oldp:
32 raise repo.RepoError(_("There is no Mercurial repository"
32 raise repo.RepoError(_("There is no Mercurial repository"
33 " here (.hg not found)"))
33 " here (.hg not found)"))
34 path = p
34 path = p
35
35
36 self.root = os.path.realpath(path)
36 self.root = os.path.realpath(path)
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.opener = util.opener(self.path)
39 self.opener = util.opener(self.path)
40 self.wopener = util.opener(self.root)
40 self.wopener = util.opener(self.root)
41
41
42 if not os.path.isdir(self.path):
42 if not os.path.isdir(self.path):
43 if create:
43 if create:
44 if not os.path.exists(path):
44 if not os.path.exists(path):
45 os.mkdir(path)
45 os.mkdir(path)
46 os.mkdir(self.path)
46 os.mkdir(self.path)
47 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
48 requirements = ("revlogv1", "store")
48 requirements = ("revlogv1", "store")
49 reqfile = self.opener("requires", "w")
49 reqfile = self.opener("requires", "w")
50 for r in requirements:
50 for r in requirements:
51 reqfile.write("%s\n" % r)
51 reqfile.write("%s\n" % r)
52 reqfile.close()
52 reqfile.close()
53 # create an invalid changelog
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
56 ' dummy changelog to prevent using the old repo layout'
57 )
57 )
58 else:
58 else:
59 raise repo.RepoError(_("repository %s not found") % path)
59 raise repo.RepoError(_("repository %s not found") % path)
60 elif create:
60 elif create:
61 raise repo.RepoError(_("repository %s already exists") % path)
61 raise repo.RepoError(_("repository %s already exists") % path)
62 else:
62 else:
63 # find requirements
63 # find requirements
64 try:
64 try:
65 requirements = self.opener("requires").read().splitlines()
65 requirements = self.opener("requires").read().splitlines()
66 except IOError, inst:
66 except IOError, inst:
67 if inst.errno != errno.ENOENT:
67 if inst.errno != errno.ENOENT:
68 raise
68 raise
69 requirements = []
69 requirements = []
70 # check them
70 # check them
71 for r in requirements:
71 for r in requirements:
72 if r not in self.supported:
72 if r not in self.supported:
73 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 raise repo.RepoError(_("requirement '%s' not supported") % r)
74
74
75 # setup store
75 # setup store
76 if "store" in requirements:
76 if "store" in requirements:
77 self.encodefn = util.encodefilename
77 self.encodefn = util.encodefilename
78 self.decodefn = util.decodefilename
78 self.decodefn = util.decodefilename
79 self.spath = os.path.join(self.path, "store")
79 self.spath = os.path.join(self.path, "store")
80 else:
80 else:
81 self.encodefn = lambda x: x
81 self.encodefn = lambda x: x
82 self.decodefn = lambda x: x
82 self.decodefn = lambda x: x
83 self.spath = self.path
83 self.spath = self.path
84 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
85
85
86 self.ui = ui.ui(parentui=parentui)
86 self.ui = ui.ui(parentui=parentui)
87 try:
87 try:
88 self.ui.readconfig(self.join("hgrc"), self.root)
88 self.ui.readconfig(self.join("hgrc"), self.root)
89 except IOError:
89 except IOError:
90 pass
90 pass
91
91
92 v = self.ui.configrevlog()
92 v = self.ui.configrevlog()
93 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
94 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
95 fl = v.get('flags', None)
95 fl = v.get('flags', None)
96 flags = 0
96 flags = 0
97 if fl != None:
97 if fl != None:
98 for x in fl.split():
98 for x in fl.split():
99 flags |= revlog.flagstr(x)
99 flags |= revlog.flagstr(x)
100 elif self.revlogv1:
100 elif self.revlogv1:
101 flags = revlog.REVLOG_DEFAULT_FLAGS
101 flags = revlog.REVLOG_DEFAULT_FLAGS
102
102
103 v = self.revlogversion | flags
103 v = self.revlogversion | flags
104 self.manifest = manifest.manifest(self.sopener, v)
104 self.manifest = manifest.manifest(self.sopener, v)
105 self.changelog = changelog.changelog(self.sopener, v)
105 self.changelog = changelog.changelog(self.sopener, v)
106
106
107 fallback = self.ui.config('ui', 'fallbackencoding')
107 fallback = self.ui.config('ui', 'fallbackencoding')
108 if fallback:
108 if fallback:
109 util._fallbackencoding = fallback
109 util._fallbackencoding = fallback
110
110
111 # the changelog might not have the inline index flag
111 # the changelog might not have the inline index flag
112 # on. If the format of the changelog is the same as found in
112 # on. If the format of the changelog is the same as found in
113 # .hgrc, apply any flags found in the .hgrc as well.
113 # .hgrc, apply any flags found in the .hgrc as well.
114 # Otherwise, just version from the changelog
114 # Otherwise, just version from the changelog
115 v = self.changelog.version
115 v = self.changelog.version
116 if v == self.revlogversion:
116 if v == self.revlogversion:
117 v |= flags
117 v |= flags
118 self.revlogversion = v
118 self.revlogversion = v
119
119
120 self.tagscache = None
120 self.tagscache = None
121 self.branchcache = None
121 self.branchcache = None
122 self.nodetagscache = None
122 self.nodetagscache = None
123 self.encodepats = None
123 self.encodepats = None
124 self.decodepats = None
124 self.decodepats = None
125 self.transhandle = None
125 self.transhandle = None
126
126
127 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
128
128
129 def url(self):
129 def url(self):
130 return 'file:' + self.root
130 return 'file:' + self.root
131
131
132 def hook(self, name, throw=False, **args):
132 def hook(self, name, throw=False, **args):
133 def callhook(hname, funcname):
133 def callhook(hname, funcname):
134 '''call python hook. hook is callable object, looked up as
134 '''call python hook. hook is callable object, looked up as
135 name in python module. if callable returns "true", hook
135 name in python module. if callable returns "true", hook
136 fails, else passes. if hook raises exception, treated as
136 fails, else passes. if hook raises exception, treated as
137 hook failure. exception propagates if throw is "true".
137 hook failure. exception propagates if throw is "true".
138
138
139 reason for "true" meaning "hook failed" is so that
139 reason for "true" meaning "hook failed" is so that
140 unmodified commands (e.g. mercurial.commands.update) can
140 unmodified commands (e.g. mercurial.commands.update) can
141 be run as hooks without wrappers to convert return values.'''
141 be run as hooks without wrappers to convert return values.'''
142
142
143 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
144 d = funcname.rfind('.')
144 d = funcname.rfind('.')
145 if d == -1:
145 if d == -1:
146 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
147 % (hname, funcname))
147 % (hname, funcname))
148 modname = funcname[:d]
148 modname = funcname[:d]
149 try:
149 try:
150 obj = __import__(modname)
150 obj = __import__(modname)
151 except ImportError:
151 except ImportError:
152 try:
152 try:
153 # extensions are loaded with hgext_ prefix
153 # extensions are loaded with hgext_ prefix
154 obj = __import__("hgext_%s" % modname)
154 obj = __import__("hgext_%s" % modname)
155 except ImportError:
155 except ImportError:
156 raise util.Abort(_('%s hook is invalid '
156 raise util.Abort(_('%s hook is invalid '
157 '(import of "%s" failed)') %
157 '(import of "%s" failed)') %
158 (hname, modname))
158 (hname, modname))
159 try:
159 try:
160 for p in funcname.split('.')[1:]:
160 for p in funcname.split('.')[1:]:
161 obj = getattr(obj, p)
161 obj = getattr(obj, p)
162 except AttributeError, err:
162 except AttributeError, err:
163 raise util.Abort(_('%s hook is invalid '
163 raise util.Abort(_('%s hook is invalid '
164 '("%s" is not defined)') %
164 '("%s" is not defined)') %
165 (hname, funcname))
165 (hname, funcname))
166 if not callable(obj):
166 if not callable(obj):
167 raise util.Abort(_('%s hook is invalid '
167 raise util.Abort(_('%s hook is invalid '
168 '("%s" is not callable)') %
168 '("%s" is not callable)') %
169 (hname, funcname))
169 (hname, funcname))
170 try:
170 try:
171 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
172 except (KeyboardInterrupt, util.SignalInterrupt):
172 except (KeyboardInterrupt, util.SignalInterrupt):
173 raise
173 raise
174 except Exception, exc:
174 except Exception, exc:
175 if isinstance(exc, util.Abort):
175 if isinstance(exc, util.Abort):
176 self.ui.warn(_('error: %s hook failed: %s\n') %
176 self.ui.warn(_('error: %s hook failed: %s\n') %
177 (hname, exc.args[0]))
177 (hname, exc.args[0]))
178 else:
178 else:
179 self.ui.warn(_('error: %s hook raised an exception: '
179 self.ui.warn(_('error: %s hook raised an exception: '
180 '%s\n') % (hname, exc))
180 '%s\n') % (hname, exc))
181 if throw:
181 if throw:
182 raise
182 raise
183 self.ui.print_exc()
183 self.ui.print_exc()
184 return True
184 return True
185 if r:
185 if r:
186 if throw:
186 if throw:
187 raise util.Abort(_('%s hook failed') % hname)
187 raise util.Abort(_('%s hook failed') % hname)
188 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 self.ui.warn(_('warning: %s hook failed\n') % hname)
189 return r
189 return r
190
190
191 def runhook(name, cmd):
191 def runhook(name, cmd):
192 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
193 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
194 r = util.system(cmd, environ=env, cwd=self.root)
194 r = util.system(cmd, environ=env, cwd=self.root)
195 if r:
195 if r:
196 desc, r = util.explain_exit(r)
196 desc, r = util.explain_exit(r)
197 if throw:
197 if throw:
198 raise util.Abort(_('%s hook %s') % (name, desc))
198 raise util.Abort(_('%s hook %s') % (name, desc))
199 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
200 return r
200 return r
201
201
202 r = False
202 r = False
203 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
204 if hname.split(".", 1)[0] == name and cmd]
204 if hname.split(".", 1)[0] == name and cmd]
205 hooks.sort()
205 hooks.sort()
206 for hname, cmd in hooks:
206 for hname, cmd in hooks:
207 if cmd.startswith('python:'):
207 if cmd.startswith('python:'):
208 r = callhook(hname, cmd[7:].strip()) or r
208 r = callhook(hname, cmd[7:].strip()) or r
209 else:
209 else:
210 r = runhook(hname, cmd) or r
210 r = runhook(hname, cmd) or r
211 return r
211 return r
212
212
213 tag_disallowed = ':\r\n'
213 tag_disallowed = ':\r\n'
214
214
215 def tag(self, name, node, message, local, user, date):
215 def tag(self, name, node, message, local, user, date):
216 '''tag a revision with a symbolic name.
216 '''tag a revision with a symbolic name.
217
217
218 if local is True, the tag is stored in a per-repository file.
218 if local is True, the tag is stored in a per-repository file.
219 otherwise, it is stored in the .hgtags file, and a new
219 otherwise, it is stored in the .hgtags file, and a new
220 changeset is committed with the change.
220 changeset is committed with the change.
221
221
222 keyword arguments:
222 keyword arguments:
223
223
224 local: whether to store tag in non-version-controlled file
224 local: whether to store tag in non-version-controlled file
225 (default False)
225 (default False)
226
226
227 message: commit message to use if committing
227 message: commit message to use if committing
228
228
229 user: name of user to use if committing
229 user: name of user to use if committing
230
230
231 date: date tuple to use if committing'''
231 date: date tuple to use if committing'''
232
232
233 for c in self.tag_disallowed:
233 for c in self.tag_disallowed:
234 if c in name:
234 if c in name:
235 raise util.Abort(_('%r cannot be used in a tag name') % c)
235 raise util.Abort(_('%r cannot be used in a tag name') % c)
236
236
237 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
238
238
239 if local:
239 if local:
240 # local tags are stored in the current charset
240 # local tags are stored in the current charset
241 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
242 self.hook('tag', node=hex(node), tag=name, local=local)
242 self.hook('tag', node=hex(node), tag=name, local=local)
243 return
243 return
244
244
245 for x in self.status()[:5]:
245 for x in self.status()[:5]:
246 if '.hgtags' in x:
246 if '.hgtags' in x:
247 raise util.Abort(_('working copy of .hgtags is changed '
247 raise util.Abort(_('working copy of .hgtags is changed '
248 '(please commit .hgtags manually)'))
248 '(please commit .hgtags manually)'))
249
249
250 # committed tags are stored in UTF-8
250 # committed tags are stored in UTF-8
251 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 line = '%s %s\n' % (hex(node), util.fromlocal(name))
252 self.wfile('.hgtags', 'ab').write(line)
252 self.wfile('.hgtags', 'ab').write(line)
253 if self.dirstate.state('.hgtags') == '?':
253 if self.dirstate.state('.hgtags') == '?':
254 self.add(['.hgtags'])
254 self.add(['.hgtags'])
255
255
256 self.commit(['.hgtags'], message, user, date)
256 self.commit(['.hgtags'], message, user, date)
257 self.hook('tag', node=hex(node), tag=name, local=local)
257 self.hook('tag', node=hex(node), tag=name, local=local)
258
258
259 def tags(self):
259 def tags(self):
260 '''return a mapping of tag to node'''
260 '''return a mapping of tag to node'''
261 if not self.tagscache:
261 if not self.tagscache:
262 self.tagscache = {}
262 self.tagscache = {}
263
263
264 def parsetag(line, context):
264 def parsetag(line, context):
265 if not line:
265 if not line:
266 return
266 return
267 s = l.split(" ", 1)
267 s = l.split(" ", 1)
268 if len(s) != 2:
268 if len(s) != 2:
269 self.ui.warn(_("%s: cannot parse entry\n") % context)
269 self.ui.warn(_("%s: cannot parse entry\n") % context)
270 return
270 return
271 node, key = s
271 node, key = s
272 key = util.tolocal(key.strip()) # stored in UTF-8
272 key = util.tolocal(key.strip()) # stored in UTF-8
273 try:
273 try:
274 bin_n = bin(node)
274 bin_n = bin(node)
275 except TypeError:
275 except TypeError:
276 self.ui.warn(_("%s: node '%s' is not well formed\n") %
276 self.ui.warn(_("%s: node '%s' is not well formed\n") %
277 (context, node))
277 (context, node))
278 return
278 return
279 if bin_n not in self.changelog.nodemap:
279 if bin_n not in self.changelog.nodemap:
280 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
280 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
281 (context, key))
281 (context, key))
282 return
282 return
283 self.tagscache[key] = bin_n
283 self.tagscache[key] = bin_n
284
284
285 # read the tags file from each head, ending with the tip,
285 # read the tags file from each head, ending with the tip,
286 # and add each tag found to the map, with "newer" ones
286 # and add each tag found to the map, with "newer" ones
287 # taking precedence
287 # taking precedence
288 f = None
288 f = None
289 for rev, node, fnode in self._hgtagsnodes():
289 for rev, node, fnode in self._hgtagsnodes():
290 f = (f and f.filectx(fnode) or
290 f = (f and f.filectx(fnode) or
291 self.filectx('.hgtags', fileid=fnode))
291 self.filectx('.hgtags', fileid=fnode))
292 count = 0
292 count = 0
293 for l in f.data().splitlines():
293 for l in f.data().splitlines():
294 count += 1
294 count += 1
295 parsetag(l, _("%s, line %d") % (str(f), count))
295 parsetag(l, _("%s, line %d") % (str(f), count))
296
296
297 try:
297 try:
298 f = self.opener("localtags")
298 f = self.opener("localtags")
299 count = 0
299 count = 0
300 for l in f:
300 for l in f:
301 # localtags are stored in the local character set
301 # localtags are stored in the local character set
302 # while the internal tag table is stored in UTF-8
302 # while the internal tag table is stored in UTF-8
303 l = util.fromlocal(l)
303 l = util.fromlocal(l)
304 count += 1
304 count += 1
305 parsetag(l, _("localtags, line %d") % count)
305 parsetag(l, _("localtags, line %d") % count)
306 except IOError:
306 except IOError:
307 pass
307 pass
308
308
309 self.tagscache['tip'] = self.changelog.tip()
309 self.tagscache['tip'] = self.changelog.tip()
310
310
311 return self.tagscache
311 return self.tagscache
312
312
313 def _hgtagsnodes(self):
313 def _hgtagsnodes(self):
314 heads = self.heads()
314 heads = self.heads()
315 heads.reverse()
315 heads.reverse()
316 last = {}
316 last = {}
317 ret = []
317 ret = []
318 for node in heads:
318 for node in heads:
319 c = self.changectx(node)
319 c = self.changectx(node)
320 rev = c.rev()
320 rev = c.rev()
321 try:
321 try:
322 fnode = c.filenode('.hgtags')
322 fnode = c.filenode('.hgtags')
323 except repo.LookupError:
323 except repo.LookupError:
324 continue
324 continue
325 ret.append((rev, node, fnode))
325 ret.append((rev, node, fnode))
326 if fnode in last:
326 if fnode in last:
327 ret[last[fnode]] = None
327 ret[last[fnode]] = None
328 last[fnode] = len(ret) - 1
328 last[fnode] = len(ret) - 1
329 return [item for item in ret if item]
329 return [item for item in ret if item]
330
330
331 def tagslist(self):
331 def tagslist(self):
332 '''return a list of tags ordered by revision'''
332 '''return a list of tags ordered by revision'''
333 l = []
333 l = []
334 for t, n in self.tags().items():
334 for t, n in self.tags().items():
335 try:
335 try:
336 r = self.changelog.rev(n)
336 r = self.changelog.rev(n)
337 except:
337 except:
338 r = -2 # sort to the beginning of the list if unknown
338 r = -2 # sort to the beginning of the list if unknown
339 l.append((r, t, n))
339 l.append((r, t, n))
340 l.sort()
340 l.sort()
341 return [(t, n) for r, t, n in l]
341 return [(t, n) for r, t, n in l]
342
342
343 def nodetags(self, node):
343 def nodetags(self, node):
344 '''return the tags associated with a node'''
344 '''return the tags associated with a node'''
345 if not self.nodetagscache:
345 if not self.nodetagscache:
346 self.nodetagscache = {}
346 self.nodetagscache = {}
347 for t, n in self.tags().items():
347 for t, n in self.tags().items():
348 self.nodetagscache.setdefault(n, []).append(t)
348 self.nodetagscache.setdefault(n, []).append(t)
349 return self.nodetagscache.get(node, [])
349 return self.nodetagscache.get(node, [])
350
350
351 def _branchtags(self):
351 def _branchtags(self):
352 partial, last, lrev = self._readbranchcache()
352 partial, last, lrev = self._readbranchcache()
353
353
354 tiprev = self.changelog.count() - 1
354 tiprev = self.changelog.count() - 1
355 if lrev != tiprev:
355 if lrev != tiprev:
356 self._updatebranchcache(partial, lrev+1, tiprev+1)
356 self._updatebranchcache(partial, lrev+1, tiprev+1)
357 self._writebranchcache(partial, self.changelog.tip(), tiprev)
357 self._writebranchcache(partial, self.changelog.tip(), tiprev)
358
358
359 return partial
359 return partial
360
360
361 def branchtags(self):
361 def branchtags(self):
362 if self.branchcache is not None:
362 if self.branchcache is not None:
363 return self.branchcache
363 return self.branchcache
364
364
365 self.branchcache = {} # avoid recursion in changectx
365 self.branchcache = {} # avoid recursion in changectx
366 partial = self._branchtags()
366 partial = self._branchtags()
367
367
368 # the branch cache is stored on disk as UTF-8, but in the local
368 # the branch cache is stored on disk as UTF-8, but in the local
369 # charset internally
369 # charset internally
370 for k, v in partial.items():
370 for k, v in partial.items():
371 self.branchcache[util.tolocal(k)] = v
371 self.branchcache[util.tolocal(k)] = v
372 return self.branchcache
372 return self.branchcache
373
373
374 def _readbranchcache(self):
374 def _readbranchcache(self):
375 partial = {}
375 partial = {}
376 try:
376 try:
377 f = self.opener("branches.cache")
377 f = self.opener("branches.cache")
378 lines = f.read().split('\n')
378 lines = f.read().split('\n')
379 f.close()
379 f.close()
380 features = lines.pop(0).strip()
380 features = lines.pop(0).strip()
381 if not features.startswith('features: '):
381 if not features.startswith('features: '):
382 raise ValueError(_('branch cache: no features specified'))
382 raise ValueError(_('branch cache: no features specified'))
383 features = features.split(' ', 1)[1].split()
383 features = features.split(' ', 1)[1].split()
384 missing_features = []
384 missing_features = []
385 for feature in self.branchcache_features:
385 for feature in self.branchcache_features:
386 try:
386 try:
387 features.remove(feature)
387 features.remove(feature)
388 except ValueError, inst:
388 except ValueError, inst:
389 missing_features.append(feature)
389 missing_features.append(feature)
390 if missing_features:
390 if missing_features:
391 raise ValueError(_('branch cache: missing features: %s')
391 raise ValueError(_('branch cache: missing features: %s')
392 % ', '.join(missing_features))
392 % ', '.join(missing_features))
393 if features:
393 if features:
394 raise ValueError(_('branch cache: unknown features: %s')
394 raise ValueError(_('branch cache: unknown features: %s')
395 % ', '.join(features))
395 % ', '.join(features))
396 last, lrev = lines.pop(0).split(" ", 1)
396 last, lrev = lines.pop(0).split(" ", 1)
397 last, lrev = bin(last), int(lrev)
397 last, lrev = bin(last), int(lrev)
398 if not (lrev < self.changelog.count() and
398 if not (lrev < self.changelog.count() and
399 self.changelog.node(lrev) == last): # sanity check
399 self.changelog.node(lrev) == last): # sanity check
400 # invalidate the cache
400 # invalidate the cache
401 raise ValueError('Invalid branch cache: unknown tip')
401 raise ValueError('Invalid branch cache: unknown tip')
402 for l in lines:
402 for l in lines:
403 if not l: continue
403 if not l: continue
404 node, label = l.split(" ", 1)
404 node, label = l.split(" ", 1)
405 partial[label.strip()] = bin(node)
405 partial[label.strip()] = bin(node)
406 except (KeyboardInterrupt, util.SignalInterrupt):
406 except (KeyboardInterrupt, util.SignalInterrupt):
407 raise
407 raise
408 except Exception, inst:
408 except Exception, inst:
409 if self.ui.debugflag:
409 if self.ui.debugflag:
410 self.ui.warn(str(inst), '\n')
410 self.ui.warn(str(inst), '\n')
411 partial, last, lrev = {}, nullid, nullrev
411 partial, last, lrev = {}, nullid, nullrev
412 return partial, last, lrev
412 return partial, last, lrev
413
413
414 def _writebranchcache(self, branches, tip, tiprev):
414 def _writebranchcache(self, branches, tip, tiprev):
415 try:
415 try:
416 f = self.opener("branches.cache", "w")
416 f = self.opener("branches.cache", "w")
417 f.write(" features: %s\n" % ' '.join(self.branchcache_features))
417 f.write(" features: %s\n" % ' '.join(self.branchcache_features))
418 f.write("%s %s\n" % (hex(tip), tiprev))
418 f.write("%s %s\n" % (hex(tip), tiprev))
419 for label, node in branches.iteritems():
419 for label, node in branches.iteritems():
420 f.write("%s %s\n" % (hex(node), label))
420 f.write("%s %s\n" % (hex(node), label))
421 except IOError:
421 except IOError:
422 pass
422 pass
423
423
424 def _updatebranchcache(self, partial, start, end):
424 def _updatebranchcache(self, partial, start, end):
425 for r in xrange(start, end):
425 for r in xrange(start, end):
426 c = self.changectx(r)
426 c = self.changectx(r)
427 b = c.branch()
427 b = c.branch()
428 partial[b] = c.node()
428 partial[b] = c.node()
429
429
430 def lookup(self, key):
430 def lookup(self, key):
431 if key == '.':
431 if key == '.':
432 key = self.dirstate.parents()[0]
432 key = self.dirstate.parents()[0]
433 if key == nullid:
433 if key == nullid:
434 raise repo.RepoError(_("no revision checked out"))
434 raise repo.RepoError(_("no revision checked out"))
435 elif key == 'null':
435 elif key == 'null':
436 return nullid
436 return nullid
437 n = self.changelog._match(key)
437 n = self.changelog._match(key)
438 if n:
438 if n:
439 return n
439 return n
440 if key in self.tags():
440 if key in self.tags():
441 return self.tags()[key]
441 return self.tags()[key]
442 if key in self.branchtags():
442 if key in self.branchtags():
443 return self.branchtags()[key]
443 return self.branchtags()[key]
444 n = self.changelog._partialmatch(key)
444 n = self.changelog._partialmatch(key)
445 if n:
445 if n:
446 return n
446 return n
447 raise repo.RepoError(_("unknown revision '%s'") % key)
447 raise repo.RepoError(_("unknown revision '%s'") % key)
448
448
449 def dev(self):
449 def dev(self):
450 return os.lstat(self.path).st_dev
450 return os.lstat(self.path).st_dev
451
451
452 def local(self):
452 def local(self):
453 return True
453 return True
454
454
455 def join(self, f):
455 def join(self, f):
456 return os.path.join(self.path, f)
456 return os.path.join(self.path, f)
457
457
458 def sjoin(self, f):
458 def sjoin(self, f):
459 f = self.encodefn(f)
459 f = self.encodefn(f)
460 return os.path.join(self.spath, f)
460 return os.path.join(self.spath, f)
461
461
462 def wjoin(self, f):
462 def wjoin(self, f):
463 return os.path.join(self.root, f)
463 return os.path.join(self.root, f)
464
464
465 def file(self, f):
465 def file(self, f):
466 if f[0] == '/':
466 if f[0] == '/':
467 f = f[1:]
467 f = f[1:]
468 return filelog.filelog(self.sopener, f, self.revlogversion)
468 return filelog.filelog(self.sopener, f, self.revlogversion)
469
469
470 def changectx(self, changeid=None):
470 def changectx(self, changeid=None):
471 return context.changectx(self, changeid)
471 return context.changectx(self, changeid)
472
472
473 def workingctx(self):
473 def workingctx(self):
474 return context.workingctx(self)
474 return context.workingctx(self)
475
475
476 def parents(self, changeid=None):
476 def parents(self, changeid=None):
477 '''
477 '''
478 get list of changectxs for parents of changeid or working directory
478 get list of changectxs for parents of changeid or working directory
479 '''
479 '''
480 if changeid is None:
480 if changeid is None:
481 pl = self.dirstate.parents()
481 pl = self.dirstate.parents()
482 else:
482 else:
483 n = self.changelog.lookup(changeid)
483 n = self.changelog.lookup(changeid)
484 pl = self.changelog.parents(n)
484 pl = self.changelog.parents(n)
485 if pl[1] == nullid:
485 if pl[1] == nullid:
486 return [self.changectx(pl[0])]
486 return [self.changectx(pl[0])]
487 return [self.changectx(pl[0]), self.changectx(pl[1])]
487 return [self.changectx(pl[0]), self.changectx(pl[1])]
488
488
489 def filectx(self, path, changeid=None, fileid=None):
489 def filectx(self, path, changeid=None, fileid=None):
490 """changeid can be a changeset revision, node, or tag.
490 """changeid can be a changeset revision, node, or tag.
491 fileid can be a file revision or node."""
491 fileid can be a file revision or node."""
492 return context.filectx(self, path, changeid, fileid)
492 return context.filectx(self, path, changeid, fileid)
493
493
494 def getcwd(self):
494 def getcwd(self):
495 return self.dirstate.getcwd()
495 return self.dirstate.getcwd()
496
496
497 def wfile(self, f, mode='r'):
497 def wfile(self, f, mode='r'):
498 return self.wopener(f, mode)
498 return self.wopener(f, mode)
499
499
500 def wread(self, filename):
500 def wread(self, filename):
501 if self.encodepats == None:
501 if self.encodepats == None:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems("encode"):
503 for pat, cmd in self.ui.configitems("encode"):
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 l.append((mf, cmd))
505 l.append((mf, cmd))
506 self.encodepats = l
506 self.encodepats = l
507
507
508 data = self.wopener(filename, 'r').read()
508 data = self.wopener(filename, 'r').read()
509
509
510 for mf, cmd in self.encodepats:
510 for mf, cmd in self.encodepats:
511 if mf(filename):
511 if mf(filename):
512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
513 data = util.filter(data, cmd)
513 data = util.filter(data, cmd)
514 break
514 break
515
515
516 return data
516 return data
517
517
518 def wwrite(self, filename, data, fd=None):
518 def wwrite(self, filename, data, fd=None):
519 if self.decodepats == None:
519 if self.decodepats == None:
520 l = []
520 l = []
521 for pat, cmd in self.ui.configitems("decode"):
521 for pat, cmd in self.ui.configitems("decode"):
522 mf = util.matcher(self.root, "", [pat], [], [])[1]
522 mf = util.matcher(self.root, "", [pat], [], [])[1]
523 l.append((mf, cmd))
523 l.append((mf, cmd))
524 self.decodepats = l
524 self.decodepats = l
525
525
526 for mf, cmd in self.decodepats:
526 for mf, cmd in self.decodepats:
527 if mf(filename):
527 if mf(filename):
528 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
528 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
529 data = util.filter(data, cmd)
529 data = util.filter(data, cmd)
530 break
530 break
531
531
532 if fd:
532 if fd:
533 return fd.write(data)
533 return fd.write(data)
534 return self.wopener(filename, 'w').write(data)
534 return self.wopener(filename, 'w').write(data)
535
535
536 def transaction(self):
536 def transaction(self):
537 tr = self.transhandle
537 tr = self.transhandle
538 if tr != None and tr.running():
538 if tr != None and tr.running():
539 return tr.nest()
539 return tr.nest()
540
540
541 # save dirstate for rollback
541 # save dirstate for rollback
542 try:
542 try:
543 ds = self.opener("dirstate").read()
543 ds = self.opener("dirstate").read()
544 except IOError:
544 except IOError:
545 ds = ""
545 ds = ""
546 self.opener("journal.dirstate", "w").write(ds)
546 self.opener("journal.dirstate", "w").write(ds)
547
547
548 renames = [(self.sjoin("journal"), self.sjoin("undo")),
548 renames = [(self.sjoin("journal"), self.sjoin("undo")),
549 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
549 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
550 tr = transaction.transaction(self.ui.warn, self.sopener,
550 tr = transaction.transaction(self.ui.warn, self.sopener,
551 self.sjoin("journal"),
551 self.sjoin("journal"),
552 aftertrans(renames))
552 aftertrans(renames))
553 self.transhandle = tr
553 self.transhandle = tr
554 return tr
554 return tr
555
555
556 def recover(self):
556 def recover(self):
557 l = self.lock()
557 l = self.lock()
558 if os.path.exists(self.sjoin("journal")):
558 if os.path.exists(self.sjoin("journal")):
559 self.ui.status(_("rolling back interrupted transaction\n"))
559 self.ui.status(_("rolling back interrupted transaction\n"))
560 transaction.rollback(self.sopener, self.sjoin("journal"))
560 transaction.rollback(self.sopener, self.sjoin("journal"))
561 self.reload()
561 self.reload()
562 return True
562 return True
563 else:
563 else:
564 self.ui.warn(_("no interrupted transaction available\n"))
564 self.ui.warn(_("no interrupted transaction available\n"))
565 return False
565 return False
566
566
567 def rollback(self, wlock=None):
567 def rollback(self, wlock=None):
568 if not wlock:
568 if not wlock:
569 wlock = self.wlock()
569 wlock = self.wlock()
570 l = self.lock()
570 l = self.lock()
571 if os.path.exists(self.sjoin("undo")):
571 if os.path.exists(self.sjoin("undo")):
572 self.ui.status(_("rolling back last transaction\n"))
572 self.ui.status(_("rolling back last transaction\n"))
573 transaction.rollback(self.sopener, self.sjoin("undo"))
573 transaction.rollback(self.sopener, self.sjoin("undo"))
574 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
574 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
575 self.reload()
575 self.reload()
576 self.wreload()
576 self.wreload()
577 else:
577 else:
578 self.ui.warn(_("no rollback information available\n"))
578 self.ui.warn(_("no rollback information available\n"))
579
579
580 def wreload(self):
580 def wreload(self):
581 self.dirstate.read()
581 self.dirstate.read()
582
582
583 def reload(self):
583 def reload(self):
584 self.changelog.load()
584 self.changelog.load()
585 self.manifest.load()
585 self.manifest.load()
586 self.tagscache = None
586 self.tagscache = None
587 self.nodetagscache = None
587 self.nodetagscache = None
588
588
589 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
589 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
590 desc=None):
590 desc=None):
591 try:
591 try:
592 l = lock.lock(lockname, 0, releasefn, desc=desc)
592 l = lock.lock(lockname, 0, releasefn, desc=desc)
593 except lock.LockHeld, inst:
593 except lock.LockHeld, inst:
594 if not wait:
594 if not wait:
595 raise
595 raise
596 self.ui.warn(_("waiting for lock on %s held by %r\n") %
596 self.ui.warn(_("waiting for lock on %s held by %r\n") %
597 (desc, inst.locker))
597 (desc, inst.locker))
598 # default to 600 seconds timeout
598 # default to 600 seconds timeout
599 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
599 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
600 releasefn, desc=desc)
600 releasefn, desc=desc)
601 if acquirefn:
601 if acquirefn:
602 acquirefn()
602 acquirefn()
603 return l
603 return l
604
604
605 def lock(self, wait=1):
605 def lock(self, wait=1):
606 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
606 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
607 desc=_('repository %s') % self.origroot)
607 desc=_('repository %s') % self.origroot)
608
608
609 def wlock(self, wait=1):
609 def wlock(self, wait=1):
610 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
610 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
611 self.wreload,
611 self.wreload,
612 desc=_('working directory of %s') % self.origroot)
612 desc=_('working directory of %s') % self.origroot)
613
613
614 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
614 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
615 """
615 """
616 commit an individual file as part of a larger transaction
616 commit an individual file as part of a larger transaction
617 """
617 """
618
618
619 t = self.wread(fn)
619 t = self.wread(fn)
620 fl = self.file(fn)
620 fl = self.file(fn)
621 fp1 = manifest1.get(fn, nullid)
621 fp1 = manifest1.get(fn, nullid)
622 fp2 = manifest2.get(fn, nullid)
622 fp2 = manifest2.get(fn, nullid)
623
623
624 meta = {}
624 meta = {}
625 cp = self.dirstate.copied(fn)
625 cp = self.dirstate.copied(fn)
626 if cp:
626 if cp:
627 # Mark the new revision of this file as a copy of another
627 # Mark the new revision of this file as a copy of another
628 # file. This copy data will effectively act as a parent
628 # file. This copy data will effectively act as a parent
629 # of this new revision. If this is a merge, the first
629 # of this new revision. If this is a merge, the first
630 # parent will be the nullid (meaning "look up the copy data")
630 # parent will be the nullid (meaning "look up the copy data")
631 # and the second one will be the other parent. For example:
631 # and the second one will be the other parent. For example:
632 #
632 #
633 # 0 --- 1 --- 3 rev1 changes file foo
633 # 0 --- 1 --- 3 rev1 changes file foo
634 # \ / rev2 renames foo to bar and changes it
634 # \ / rev2 renames foo to bar and changes it
635 # \- 2 -/ rev3 should have bar with all changes and
635 # \- 2 -/ rev3 should have bar with all changes and
636 # should record that bar descends from
636 # should record that bar descends from
637 # bar in rev2 and foo in rev1
637 # bar in rev2 and foo in rev1
638 #
638 #
639 # this allows this merge to succeed:
639 # this allows this merge to succeed:
640 #
640 #
641 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
641 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
642 # \ / merging rev3 and rev4 should use bar@rev2
642 # \ / merging rev3 and rev4 should use bar@rev2
643 # \- 2 --- 4 as the merge base
643 # \- 2 --- 4 as the merge base
644 #
644 #
645 meta["copy"] = cp
645 meta["copy"] = cp
646 if not manifest2: # not a branch merge
646 if not manifest2: # not a branch merge
647 meta["copyrev"] = hex(manifest1.get(cp, nullid))
647 meta["copyrev"] = hex(manifest1.get(cp, nullid))
648 fp2 = nullid
648 fp2 = nullid
649 elif fp2 != nullid: # copied on remote side
649 elif fp2 != nullid: # copied on remote side
650 meta["copyrev"] = hex(manifest1.get(cp, nullid))
650 meta["copyrev"] = hex(manifest1.get(cp, nullid))
651 elif fp1 != nullid: # copied on local side, reversed
651 elif fp1 != nullid: # copied on local side, reversed
652 meta["copyrev"] = hex(manifest2.get(cp))
652 meta["copyrev"] = hex(manifest2.get(cp))
653 fp2 = fp1
653 fp2 = fp1
654 else: # directory rename
654 else: # directory rename
655 meta["copyrev"] = hex(manifest1.get(cp, nullid))
655 meta["copyrev"] = hex(manifest1.get(cp, nullid))
656 self.ui.debug(_(" %s: copy %s:%s\n") %
656 self.ui.debug(_(" %s: copy %s:%s\n") %
657 (fn, cp, meta["copyrev"]))
657 (fn, cp, meta["copyrev"]))
658 fp1 = nullid
658 fp1 = nullid
659 elif fp2 != nullid:
659 elif fp2 != nullid:
660 # is one parent an ancestor of the other?
660 # is one parent an ancestor of the other?
661 fpa = fl.ancestor(fp1, fp2)
661 fpa = fl.ancestor(fp1, fp2)
662 if fpa == fp1:
662 if fpa == fp1:
663 fp1, fp2 = fp2, nullid
663 fp1, fp2 = fp2, nullid
664 elif fpa == fp2:
664 elif fpa == fp2:
665 fp2 = nullid
665 fp2 = nullid
666
666
667 # is the file unmodified from the parent? report existing entry
667 # is the file unmodified from the parent? report existing entry
668 if fp2 == nullid and not fl.cmp(fp1, t):
668 if fp2 == nullid and not fl.cmp(fp1, t):
669 return fp1
669 return fp1
670
670
671 changelist.append(fn)
671 changelist.append(fn)
672 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
672 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
673
673
674 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
674 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
675 if p1 is None:
675 if p1 is None:
676 p1, p2 = self.dirstate.parents()
676 p1, p2 = self.dirstate.parents()
677 return self.commit(files=files, text=text, user=user, date=date,
677 return self.commit(files=files, text=text, user=user, date=date,
678 p1=p1, p2=p2, wlock=wlock)
678 p1=p1, p2=p2, wlock=wlock)
679
679
680 def commit(self, files=None, text="", user=None, date=None,
680 def commit(self, files=None, text="", user=None, date=None,
681 match=util.always, force=False, lock=None, wlock=None,
681 match=util.always, force=False, lock=None, wlock=None,
682 force_editor=False, p1=None, p2=None, extra={}):
682 force_editor=False, p1=None, p2=None, extra={}):
683
683
684 commit = []
684 commit = []
685 remove = []
685 remove = []
686 changed = []
686 changed = []
687 use_dirstate = (p1 is None) # not rawcommit
687 use_dirstate = (p1 is None) # not rawcommit
688 extra = extra.copy()
688 extra = extra.copy()
689
689
690 if use_dirstate:
690 if use_dirstate:
691 if files:
691 if files:
692 for f in files:
692 for f in files:
693 s = self.dirstate.state(f)
693 s = self.dirstate.state(f)
694 if s in 'nmai':
694 if s in 'nmai':
695 commit.append(f)
695 commit.append(f)
696 elif s == 'r':
696 elif s == 'r':
697 remove.append(f)
697 remove.append(f)
698 else:
698 else:
699 self.ui.warn(_("%s not tracked!\n") % f)
699 self.ui.warn(_("%s not tracked!\n") % f)
700 else:
700 else:
701 changes = self.status(match=match)[:5]
701 changes = self.status(match=match)[:5]
702 modified, added, removed, deleted, unknown = changes
702 modified, added, removed, deleted, unknown = changes
703 commit = modified + added
703 commit = modified + added
704 remove = removed
704 remove = removed
705 else:
705 else:
706 commit = files
706 commit = files
707
707
708 if use_dirstate:
708 if use_dirstate:
709 p1, p2 = self.dirstate.parents()
709 p1, p2 = self.dirstate.parents()
710 update_dirstate = True
710 update_dirstate = True
711 else:
711 else:
712 p1, p2 = p1, p2 or nullid
712 p1, p2 = p1, p2 or nullid
713 update_dirstate = (self.dirstate.parents()[0] == p1)
713 update_dirstate = (self.dirstate.parents()[0] == p1)
714
714
715 c1 = self.changelog.read(p1)
715 c1 = self.changelog.read(p1)
716 c2 = self.changelog.read(p2)
716 c2 = self.changelog.read(p2)
717 m1 = self.manifest.read(c1[0]).copy()
717 m1 = self.manifest.read(c1[0]).copy()
718 m2 = self.manifest.read(c2[0])
718 m2 = self.manifest.read(c2[0])
719
719
720 if use_dirstate:
720 if use_dirstate:
721 branchname = self.workingctx().branch()
721 branchname = self.workingctx().branch()
722 try:
722 try:
723 branchname = branchname.decode('UTF-8').encode('UTF-8')
723 branchname = branchname.decode('UTF-8').encode('UTF-8')
724 except UnicodeDecodeError:
724 except UnicodeDecodeError:
725 raise util.Abort(_('branch name not in UTF-8!'))
725 raise util.Abort(_('branch name not in UTF-8!'))
726 else:
726 else:
727 branchname = ""
727 branchname = ""
728
728
729 if use_dirstate:
729 if use_dirstate:
730 oldname = c1[5].get("branch", "") # stored in UTF-8
730 oldname = c1[5].get("branch", "") # stored in UTF-8
731 if not commit and not remove and not force and p2 == nullid and \
731 if not commit and not remove and not force and p2 == nullid and \
732 branchname == oldname:
732 branchname == oldname:
733 self.ui.status(_("nothing changed\n"))
733 self.ui.status(_("nothing changed\n"))
734 return None
734 return None
735
735
736 xp1 = hex(p1)
736 xp1 = hex(p1)
737 if p2 == nullid: xp2 = ''
737 if p2 == nullid: xp2 = ''
738 else: xp2 = hex(p2)
738 else: xp2 = hex(p2)
739
739
740 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
740 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
741
741
742 if not wlock:
742 if not wlock:
743 wlock = self.wlock()
743 wlock = self.wlock()
744 if not lock:
744 if not lock:
745 lock = self.lock()
745 lock = self.lock()
746 tr = self.transaction()
746 tr = self.transaction()
747
747
748 # check in files
748 # check in files
749 new = {}
749 new = {}
750 linkrev = self.changelog.count()
750 linkrev = self.changelog.count()
751 commit.sort()
751 commit.sort()
752 for f in commit:
752 for f in commit:
753 self.ui.note(f + "\n")
753 self.ui.note(f + "\n")
754 try:
754 try:
755 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
755 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
756 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
756 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
757 except IOError:
757 except IOError:
758 if use_dirstate:
758 if use_dirstate:
759 self.ui.warn(_("trouble committing %s!\n") % f)
759 self.ui.warn(_("trouble committing %s!\n") % f)
760 raise
760 raise
761 else:
761 else:
762 remove.append(f)
762 remove.append(f)
763
763
764 # update manifest
764 # update manifest
765 m1.update(new)
765 m1.update(new)
766 remove.sort()
766 remove.sort()
767
767
768 for f in remove:
768 for f in remove:
769 if f in m1:
769 if f in m1:
770 del m1[f]
770 del m1[f]
771 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
771 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
772
772
773 # add changeset
773 # add changeset
774 new = new.keys()
774 new = new.keys()
775 new.sort()
775 new.sort()
776
776
777 user = user or self.ui.username()
777 user = user or self.ui.username()
778 if not text or force_editor:
778 if not text or force_editor:
779 edittext = []
779 edittext = []
780 if text:
780 if text:
781 edittext.append(text)
781 edittext.append(text)
782 edittext.append("")
782 edittext.append("")
783 edittext.append("HG: user: %s" % user)
783 edittext.append("HG: user: %s" % user)
784 if p2 != nullid:
784 if p2 != nullid:
785 edittext.append("HG: branch merge")
785 edittext.append("HG: branch merge")
786 edittext.extend(["HG: changed %s" % f for f in changed])
786 edittext.extend(["HG: changed %s" % f for f in changed])
787 edittext.extend(["HG: removed %s" % f for f in remove])
787 edittext.extend(["HG: removed %s" % f for f in remove])
788 if not changed and not remove:
788 if not changed and not remove:
789 edittext.append("HG: no files changed")
789 edittext.append("HG: no files changed")
790 edittext.append("")
790 edittext.append("")
791 # run editor in the repository root
791 # run editor in the repository root
792 olddir = os.getcwd()
792 olddir = os.getcwd()
793 os.chdir(self.root)
793 os.chdir(self.root)
794 text = self.ui.edit("\n".join(edittext), user)
794 text = self.ui.edit("\n".join(edittext), user)
795 os.chdir(olddir)
795 os.chdir(olddir)
796
796
797 lines = [line.rstrip() for line in text.rstrip().splitlines()]
797 lines = [line.rstrip() for line in text.rstrip().splitlines()]
798 while lines and not lines[0]:
798 while lines and not lines[0]:
799 del lines[0]
799 del lines[0]
800 if not lines:
800 if not lines:
801 return None
801 return None
802 text = '\n'.join(lines)
802 text = '\n'.join(lines)
803 if branchname:
803 if branchname:
804 extra["branch"] = branchname
804 extra["branch"] = branchname
805 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
805 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
806 user, date, extra)
806 user, date, extra)
807 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
807 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
808 parent2=xp2)
808 parent2=xp2)
809 tr.close()
809 tr.close()
810
810
811 if use_dirstate or update_dirstate:
811 if use_dirstate or update_dirstate:
812 self.dirstate.setparents(n)
812 self.dirstate.setparents(n)
813 if use_dirstate:
813 if use_dirstate:
814 self.dirstate.update(new, "n")
814 self.dirstate.update(new, "n")
815 self.dirstate.forget(remove)
815 self.dirstate.forget(remove)
816
816
817 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
817 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
818 return n
818 return n
819
819
820 def walk(self, node=None, files=[], match=util.always, badmatch=None):
820 def walk(self, node=None, files=[], match=util.always, badmatch=None):
821 '''
821 '''
822 walk recursively through the directory tree or a given
822 walk recursively through the directory tree or a given
823 changeset, finding all files matched by the match
823 changeset, finding all files matched by the match
824 function
824 function
825
825
826 results are yielded in a tuple (src, filename), where src
826 results are yielded in a tuple (src, filename), where src
827 is one of:
827 is one of:
828 'f' the file was found in the directory tree
828 'f' the file was found in the directory tree
829 'm' the file was only in the dirstate and not in the tree
829 'm' the file was only in the dirstate and not in the tree
830 'b' file was not found and matched badmatch
830 'b' file was not found and matched badmatch
831 '''
831 '''
832
832
833 if node:
833 if node:
834 fdict = dict.fromkeys(files)
834 fdict = dict.fromkeys(files)
835 for fn in self.manifest.read(self.changelog.read(node)[0]):
835 mdict = self.manifest.read(self.changelog.read(node)[0])
836 mfiles = mdict.keys()
837 mfiles.sort()
838 for fn in mfiles:
836 for ffn in fdict:
839 for ffn in fdict:
837 # match if the file is the exact name or a directory
840 # match if the file is the exact name or a directory
838 if ffn == fn or fn.startswith("%s/" % ffn):
841 if ffn == fn or fn.startswith("%s/" % ffn):
839 del fdict[ffn]
842 del fdict[ffn]
840 break
843 break
841 if match(fn):
844 if match(fn):
842 yield 'm', fn
845 yield 'm', fn
843 for fn in fdict:
846 ffiles = fdict.keys()
847 ffiles.sort()
848 for fn in ffiles:
844 if badmatch and badmatch(fn):
849 if badmatch and badmatch(fn):
845 if match(fn):
850 if match(fn):
846 yield 'b', fn
851 yield 'b', fn
847 else:
852 else:
848 self.ui.warn(_('%s: No such file in rev %s\n') % (
853 self.ui.warn(_('%s: No such file in rev %s\n') % (
849 util.pathto(self.getcwd(), fn), short(node)))
854 util.pathto(self.getcwd(), fn), short(node)))
850 else:
855 else:
851 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
856 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
852 yield src, fn
857 yield src, fn
853
858
854 def status(self, node1=None, node2=None, files=[], match=util.always,
859 def status(self, node1=None, node2=None, files=[], match=util.always,
855 wlock=None, list_ignored=False, list_clean=False):
860 wlock=None, list_ignored=False, list_clean=False):
856 """return status of files between two nodes or node and working directory
861 """return status of files between two nodes or node and working directory
857
862
858 If node1 is None, use the first dirstate parent instead.
863 If node1 is None, use the first dirstate parent instead.
859 If node2 is None, compare node1 with working directory.
864 If node2 is None, compare node1 with working directory.
860 """
865 """
861
866
862 def fcmp(fn, mf):
867 def fcmp(fn, mf):
863 t1 = self.wread(fn)
868 t1 = self.wread(fn)
864 return self.file(fn).cmp(mf.get(fn, nullid), t1)
869 return self.file(fn).cmp(mf.get(fn, nullid), t1)
865
870
866 def mfmatches(node):
871 def mfmatches(node):
867 change = self.changelog.read(node)
872 change = self.changelog.read(node)
868 mf = self.manifest.read(change[0]).copy()
873 mf = self.manifest.read(change[0]).copy()
869 for fn in mf.keys():
874 for fn in mf.keys():
870 if not match(fn):
875 if not match(fn):
871 del mf[fn]
876 del mf[fn]
872 return mf
877 return mf
873
878
874 modified, added, removed, deleted, unknown = [], [], [], [], []
879 modified, added, removed, deleted, unknown = [], [], [], [], []
875 ignored, clean = [], []
880 ignored, clean = [], []
876
881
877 compareworking = False
882 compareworking = False
878 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
883 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
879 compareworking = True
884 compareworking = True
880
885
881 if not compareworking:
886 if not compareworking:
882 # read the manifest from node1 before the manifest from node2,
887 # read the manifest from node1 before the manifest from node2,
883 # so that we'll hit the manifest cache if we're going through
888 # so that we'll hit the manifest cache if we're going through
884 # all the revisions in parent->child order.
889 # all the revisions in parent->child order.
885 mf1 = mfmatches(node1)
890 mf1 = mfmatches(node1)
886
891
887 # are we comparing the working directory?
892 # are we comparing the working directory?
888 if not node2:
893 if not node2:
889 if not wlock:
894 if not wlock:
890 try:
895 try:
891 wlock = self.wlock(wait=0)
896 wlock = self.wlock(wait=0)
892 except lock.LockException:
897 except lock.LockException:
893 wlock = None
898 wlock = None
894 (lookup, modified, added, removed, deleted, unknown,
899 (lookup, modified, added, removed, deleted, unknown,
895 ignored, clean) = self.dirstate.status(files, match,
900 ignored, clean) = self.dirstate.status(files, match,
896 list_ignored, list_clean)
901 list_ignored, list_clean)
897
902
898 # are we comparing working dir against its parent?
903 # are we comparing working dir against its parent?
899 if compareworking:
904 if compareworking:
900 if lookup:
905 if lookup:
901 # do a full compare of any files that might have changed
906 # do a full compare of any files that might have changed
902 mf2 = mfmatches(self.dirstate.parents()[0])
907 mf2 = mfmatches(self.dirstate.parents()[0])
903 for f in lookup:
908 for f in lookup:
904 if fcmp(f, mf2):
909 if fcmp(f, mf2):
905 modified.append(f)
910 modified.append(f)
906 else:
911 else:
907 clean.append(f)
912 clean.append(f)
908 if wlock is not None:
913 if wlock is not None:
909 self.dirstate.update([f], "n")
914 self.dirstate.update([f], "n")
910 else:
915 else:
911 # we are comparing working dir against non-parent
916 # we are comparing working dir against non-parent
912 # generate a pseudo-manifest for the working dir
917 # generate a pseudo-manifest for the working dir
913 # XXX: create it in dirstate.py ?
918 # XXX: create it in dirstate.py ?
914 mf2 = mfmatches(self.dirstate.parents()[0])
919 mf2 = mfmatches(self.dirstate.parents()[0])
915 for f in lookup + modified + added:
920 for f in lookup + modified + added:
916 mf2[f] = ""
921 mf2[f] = ""
917 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
922 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
918 for f in removed:
923 for f in removed:
919 if f in mf2:
924 if f in mf2:
920 del mf2[f]
925 del mf2[f]
921 else:
926 else:
922 # we are comparing two revisions
927 # we are comparing two revisions
923 mf2 = mfmatches(node2)
928 mf2 = mfmatches(node2)
924
929
925 if not compareworking:
930 if not compareworking:
926 # flush lists from dirstate before comparing manifests
931 # flush lists from dirstate before comparing manifests
927 modified, added, clean = [], [], []
932 modified, added, clean = [], [], []
928
933
929 # make sure to sort the files so we talk to the disk in a
934 # make sure to sort the files so we talk to the disk in a
930 # reasonable order
935 # reasonable order
931 mf2keys = mf2.keys()
936 mf2keys = mf2.keys()
932 mf2keys.sort()
937 mf2keys.sort()
933 for fn in mf2keys:
938 for fn in mf2keys:
934 if mf1.has_key(fn):
939 if mf1.has_key(fn):
935 if mf1.flags(fn) != mf2.flags(fn) or \
940 if mf1.flags(fn) != mf2.flags(fn) or \
936 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
941 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
937 modified.append(fn)
942 modified.append(fn)
938 elif list_clean:
943 elif list_clean:
939 clean.append(fn)
944 clean.append(fn)
940 del mf1[fn]
945 del mf1[fn]
941 else:
946 else:
942 added.append(fn)
947 added.append(fn)
943
948
944 removed = mf1.keys()
949 removed = mf1.keys()
945
950
946 # sort and return results:
951 # sort and return results:
947 for l in modified, added, removed, deleted, unknown, ignored, clean:
952 for l in modified, added, removed, deleted, unknown, ignored, clean:
948 l.sort()
953 l.sort()
949 return (modified, added, removed, deleted, unknown, ignored, clean)
954 return (modified, added, removed, deleted, unknown, ignored, clean)
950
955
951 def add(self, list, wlock=None):
956 def add(self, list, wlock=None):
952 if not wlock:
957 if not wlock:
953 wlock = self.wlock()
958 wlock = self.wlock()
954 for f in list:
959 for f in list:
955 p = self.wjoin(f)
960 p = self.wjoin(f)
956 if not os.path.exists(p):
961 if not os.path.exists(p):
957 self.ui.warn(_("%s does not exist!\n") % f)
962 self.ui.warn(_("%s does not exist!\n") % f)
958 elif not os.path.isfile(p):
963 elif not os.path.isfile(p):
959 self.ui.warn(_("%s not added: only files supported currently\n")
964 self.ui.warn(_("%s not added: only files supported currently\n")
960 % f)
965 % f)
961 elif self.dirstate.state(f) in 'an':
966 elif self.dirstate.state(f) in 'an':
962 self.ui.warn(_("%s already tracked!\n") % f)
967 self.ui.warn(_("%s already tracked!\n") % f)
963 else:
968 else:
964 self.dirstate.update([f], "a")
969 self.dirstate.update([f], "a")
965
970
966 def forget(self, list, wlock=None):
971 def forget(self, list, wlock=None):
967 if not wlock:
972 if not wlock:
968 wlock = self.wlock()
973 wlock = self.wlock()
969 for f in list:
974 for f in list:
970 if self.dirstate.state(f) not in 'ai':
975 if self.dirstate.state(f) not in 'ai':
971 self.ui.warn(_("%s not added!\n") % f)
976 self.ui.warn(_("%s not added!\n") % f)
972 else:
977 else:
973 self.dirstate.forget([f])
978 self.dirstate.forget([f])
974
979
975 def remove(self, list, unlink=False, wlock=None):
980 def remove(self, list, unlink=False, wlock=None):
976 if unlink:
981 if unlink:
977 for f in list:
982 for f in list:
978 try:
983 try:
979 util.unlink(self.wjoin(f))
984 util.unlink(self.wjoin(f))
980 except OSError, inst:
985 except OSError, inst:
981 if inst.errno != errno.ENOENT:
986 if inst.errno != errno.ENOENT:
982 raise
987 raise
983 if not wlock:
988 if not wlock:
984 wlock = self.wlock()
989 wlock = self.wlock()
985 for f in list:
990 for f in list:
986 p = self.wjoin(f)
991 p = self.wjoin(f)
987 if os.path.exists(p):
992 if os.path.exists(p):
988 self.ui.warn(_("%s still exists!\n") % f)
993 self.ui.warn(_("%s still exists!\n") % f)
989 elif self.dirstate.state(f) == 'a':
994 elif self.dirstate.state(f) == 'a':
990 self.dirstate.forget([f])
995 self.dirstate.forget([f])
991 elif f not in self.dirstate:
996 elif f not in self.dirstate:
992 self.ui.warn(_("%s not tracked!\n") % f)
997 self.ui.warn(_("%s not tracked!\n") % f)
993 else:
998 else:
994 self.dirstate.update([f], "r")
999 self.dirstate.update([f], "r")
995
1000
996 def undelete(self, list, wlock=None):
1001 def undelete(self, list, wlock=None):
997 p = self.dirstate.parents()[0]
1002 p = self.dirstate.parents()[0]
998 mn = self.changelog.read(p)[0]
1003 mn = self.changelog.read(p)[0]
999 m = self.manifest.read(mn)
1004 m = self.manifest.read(mn)
1000 if not wlock:
1005 if not wlock:
1001 wlock = self.wlock()
1006 wlock = self.wlock()
1002 for f in list:
1007 for f in list:
1003 if self.dirstate.state(f) not in "r":
1008 if self.dirstate.state(f) not in "r":
1004 self.ui.warn("%s not removed!\n" % f)
1009 self.ui.warn("%s not removed!\n" % f)
1005 else:
1010 else:
1006 t = self.file(f).read(m[f])
1011 t = self.file(f).read(m[f])
1007 self.wwrite(f, t)
1012 self.wwrite(f, t)
1008 util.set_exec(self.wjoin(f), m.execf(f))
1013 util.set_exec(self.wjoin(f), m.execf(f))
1009 self.dirstate.update([f], "n")
1014 self.dirstate.update([f], "n")
1010
1015
1011 def copy(self, source, dest, wlock=None):
1016 def copy(self, source, dest, wlock=None):
1012 p = self.wjoin(dest)
1017 p = self.wjoin(dest)
1013 if not os.path.exists(p):
1018 if not os.path.exists(p):
1014 self.ui.warn(_("%s does not exist!\n") % dest)
1019 self.ui.warn(_("%s does not exist!\n") % dest)
1015 elif not os.path.isfile(p):
1020 elif not os.path.isfile(p):
1016 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1021 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1017 else:
1022 else:
1018 if not wlock:
1023 if not wlock:
1019 wlock = self.wlock()
1024 wlock = self.wlock()
1020 if self.dirstate.state(dest) == '?':
1025 if self.dirstate.state(dest) == '?':
1021 self.dirstate.update([dest], "a")
1026 self.dirstate.update([dest], "a")
1022 self.dirstate.copy(source, dest)
1027 self.dirstate.copy(source, dest)
1023
1028
1024 def heads(self, start=None):
1029 def heads(self, start=None):
1025 heads = self.changelog.heads(start)
1030 heads = self.changelog.heads(start)
1026 # sort the output in rev descending order
1031 # sort the output in rev descending order
1027 heads = [(-self.changelog.rev(h), h) for h in heads]
1032 heads = [(-self.changelog.rev(h), h) for h in heads]
1028 heads.sort()
1033 heads.sort()
1029 return [n for (r, n) in heads]
1034 return [n for (r, n) in heads]
1030
1035
1031 # branchlookup returns a dict giving a list of branches for
1036 # branchlookup returns a dict giving a list of branches for
1032 # each head. A branch is defined as the tag of a node or
1037 # each head. A branch is defined as the tag of a node or
1033 # the branch of the node's parents. If a node has multiple
1038 # the branch of the node's parents. If a node has multiple
1034 # branch tags, tags are eliminated if they are visible from other
1039 # branch tags, tags are eliminated if they are visible from other
1035 # branch tags.
1040 # branch tags.
1036 #
1041 #
1037 # So, for this graph: a->b->c->d->e
1042 # So, for this graph: a->b->c->d->e
1038 # \ /
1043 # \ /
1039 # aa -----/
1044 # aa -----/
1040 # a has tag 2.6.12
1045 # a has tag 2.6.12
1041 # d has tag 2.6.13
1046 # d has tag 2.6.13
1042 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1047 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1043 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1048 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1044 # from the list.
1049 # from the list.
1045 #
1050 #
1046 # It is possible that more than one head will have the same branch tag.
1051 # It is possible that more than one head will have the same branch tag.
1047 # callers need to check the result for multiple heads under the same
1052 # callers need to check the result for multiple heads under the same
1048 # branch tag if that is a problem for them (ie checkout of a specific
1053 # branch tag if that is a problem for them (ie checkout of a specific
1049 # branch).
1054 # branch).
1050 #
1055 #
1051 # passing in a specific branch will limit the depth of the search
1056 # passing in a specific branch will limit the depth of the search
1052 # through the parents. It won't limit the branches returned in the
1057 # through the parents. It won't limit the branches returned in the
1053 # result though.
1058 # result though.
1054 def branchlookup(self, heads=None, branch=None):
1059 def branchlookup(self, heads=None, branch=None):
1055 if not heads:
1060 if not heads:
1056 heads = self.heads()
1061 heads = self.heads()
1057 headt = [ h for h in heads ]
1062 headt = [ h for h in heads ]
1058 chlog = self.changelog
1063 chlog = self.changelog
1059 branches = {}
1064 branches = {}
1060 merges = []
1065 merges = []
1061 seenmerge = {}
1066 seenmerge = {}
1062
1067
1063 # traverse the tree once for each head, recording in the branches
1068 # traverse the tree once for each head, recording in the branches
1064 # dict which tags are visible from this head. The branches
1069 # dict which tags are visible from this head. The branches
1065 # dict also records which tags are visible from each tag
1070 # dict also records which tags are visible from each tag
1066 # while we traverse.
1071 # while we traverse.
1067 while headt or merges:
1072 while headt or merges:
1068 if merges:
1073 if merges:
1069 n, found = merges.pop()
1074 n, found = merges.pop()
1070 visit = [n]
1075 visit = [n]
1071 else:
1076 else:
1072 h = headt.pop()
1077 h = headt.pop()
1073 visit = [h]
1078 visit = [h]
1074 found = [h]
1079 found = [h]
1075 seen = {}
1080 seen = {}
1076 while visit:
1081 while visit:
1077 n = visit.pop()
1082 n = visit.pop()
1078 if n in seen:
1083 if n in seen:
1079 continue
1084 continue
1080 pp = chlog.parents(n)
1085 pp = chlog.parents(n)
1081 tags = self.nodetags(n)
1086 tags = self.nodetags(n)
1082 if tags:
1087 if tags:
1083 for x in tags:
1088 for x in tags:
1084 if x == 'tip':
1089 if x == 'tip':
1085 continue
1090 continue
1086 for f in found:
1091 for f in found:
1087 branches.setdefault(f, {})[n] = 1
1092 branches.setdefault(f, {})[n] = 1
1088 branches.setdefault(n, {})[n] = 1
1093 branches.setdefault(n, {})[n] = 1
1089 break
1094 break
1090 if n not in found:
1095 if n not in found:
1091 found.append(n)
1096 found.append(n)
1092 if branch in tags:
1097 if branch in tags:
1093 continue
1098 continue
1094 seen[n] = 1
1099 seen[n] = 1
1095 if pp[1] != nullid and n not in seenmerge:
1100 if pp[1] != nullid and n not in seenmerge:
1096 merges.append((pp[1], [x for x in found]))
1101 merges.append((pp[1], [x for x in found]))
1097 seenmerge[n] = 1
1102 seenmerge[n] = 1
1098 if pp[0] != nullid:
1103 if pp[0] != nullid:
1099 visit.append(pp[0])
1104 visit.append(pp[0])
1100 # traverse the branches dict, eliminating branch tags from each
1105 # traverse the branches dict, eliminating branch tags from each
1101 # head that are visible from another branch tag for that head.
1106 # head that are visible from another branch tag for that head.
1102 out = {}
1107 out = {}
1103 viscache = {}
1108 viscache = {}
1104 for h in heads:
1109 for h in heads:
1105 def visible(node):
1110 def visible(node):
1106 if node in viscache:
1111 if node in viscache:
1107 return viscache[node]
1112 return viscache[node]
1108 ret = {}
1113 ret = {}
1109 visit = [node]
1114 visit = [node]
1110 while visit:
1115 while visit:
1111 x = visit.pop()
1116 x = visit.pop()
1112 if x in viscache:
1117 if x in viscache:
1113 ret.update(viscache[x])
1118 ret.update(viscache[x])
1114 elif x not in ret:
1119 elif x not in ret:
1115 ret[x] = 1
1120 ret[x] = 1
1116 if x in branches:
1121 if x in branches:
1117 visit[len(visit):] = branches[x].keys()
1122 visit[len(visit):] = branches[x].keys()
1118 viscache[node] = ret
1123 viscache[node] = ret
1119 return ret
1124 return ret
1120 if h not in branches:
1125 if h not in branches:
1121 continue
1126 continue
1122 # O(n^2), but somewhat limited. This only searches the
1127 # O(n^2), but somewhat limited. This only searches the
1123 # tags visible from a specific head, not all the tags in the
1128 # tags visible from a specific head, not all the tags in the
1124 # whole repo.
1129 # whole repo.
1125 for b in branches[h]:
1130 for b in branches[h]:
1126 vis = False
1131 vis = False
1127 for bb in branches[h].keys():
1132 for bb in branches[h].keys():
1128 if b != bb:
1133 if b != bb:
1129 if b in visible(bb):
1134 if b in visible(bb):
1130 vis = True
1135 vis = True
1131 break
1136 break
1132 if not vis:
1137 if not vis:
1133 l = out.setdefault(h, [])
1138 l = out.setdefault(h, [])
1134 l[len(l):] = self.nodetags(b)
1139 l[len(l):] = self.nodetags(b)
1135 return out
1140 return out
1136
1141
1137 def branches(self, nodes):
1142 def branches(self, nodes):
1138 if not nodes:
1143 if not nodes:
1139 nodes = [self.changelog.tip()]
1144 nodes = [self.changelog.tip()]
1140 b = []
1145 b = []
1141 for n in nodes:
1146 for n in nodes:
1142 t = n
1147 t = n
1143 while 1:
1148 while 1:
1144 p = self.changelog.parents(n)
1149 p = self.changelog.parents(n)
1145 if p[1] != nullid or p[0] == nullid:
1150 if p[1] != nullid or p[0] == nullid:
1146 b.append((t, n, p[0], p[1]))
1151 b.append((t, n, p[0], p[1]))
1147 break
1152 break
1148 n = p[0]
1153 n = p[0]
1149 return b
1154 return b
1150
1155
1151 def between(self, pairs):
1156 def between(self, pairs):
1152 r = []
1157 r = []
1153
1158
1154 for top, bottom in pairs:
1159 for top, bottom in pairs:
1155 n, l, i = top, [], 0
1160 n, l, i = top, [], 0
1156 f = 1
1161 f = 1
1157
1162
1158 while n != bottom:
1163 while n != bottom:
1159 p = self.changelog.parents(n)[0]
1164 p = self.changelog.parents(n)[0]
1160 if i == f:
1165 if i == f:
1161 l.append(n)
1166 l.append(n)
1162 f = f * 2
1167 f = f * 2
1163 n = p
1168 n = p
1164 i += 1
1169 i += 1
1165
1170
1166 r.append(l)
1171 r.append(l)
1167
1172
1168 return r
1173 return r
1169
1174
1170 def findincoming(self, remote, base=None, heads=None, force=False):
1175 def findincoming(self, remote, base=None, heads=None, force=False):
1171 """Return list of roots of the subsets of missing nodes from remote
1176 """Return list of roots of the subsets of missing nodes from remote
1172
1177
1173 If base dict is specified, assume that these nodes and their parents
1178 If base dict is specified, assume that these nodes and their parents
1174 exist on the remote side and that no child of a node of base exists
1179 exist on the remote side and that no child of a node of base exists
1175 in both remote and self.
1180 in both remote and self.
1176 Furthermore base will be updated to include the nodes that exists
1181 Furthermore base will be updated to include the nodes that exists
1177 in self and remote but no children exists in self and remote.
1182 in self and remote but no children exists in self and remote.
1178 If a list of heads is specified, return only nodes which are heads
1183 If a list of heads is specified, return only nodes which are heads
1179 or ancestors of these heads.
1184 or ancestors of these heads.
1180
1185
1181 All the ancestors of base are in self and in remote.
1186 All the ancestors of base are in self and in remote.
1182 All the descendants of the list returned are missing in self.
1187 All the descendants of the list returned are missing in self.
1183 (and so we know that the rest of the nodes are missing in remote, see
1188 (and so we know that the rest of the nodes are missing in remote, see
1184 outgoing)
1189 outgoing)
1185 """
1190 """
1186 m = self.changelog.nodemap
1191 m = self.changelog.nodemap
1187 search = []
1192 search = []
1188 fetch = {}
1193 fetch = {}
1189 seen = {}
1194 seen = {}
1190 seenbranch = {}
1195 seenbranch = {}
1191 if base == None:
1196 if base == None:
1192 base = {}
1197 base = {}
1193
1198
1194 if not heads:
1199 if not heads:
1195 heads = remote.heads()
1200 heads = remote.heads()
1196
1201
1197 if self.changelog.tip() == nullid:
1202 if self.changelog.tip() == nullid:
1198 base[nullid] = 1
1203 base[nullid] = 1
1199 if heads != [nullid]:
1204 if heads != [nullid]:
1200 return [nullid]
1205 return [nullid]
1201 return []
1206 return []
1202
1207
1203 # assume we're closer to the tip than the root
1208 # assume we're closer to the tip than the root
1204 # and start by examining the heads
1209 # and start by examining the heads
1205 self.ui.status(_("searching for changes\n"))
1210 self.ui.status(_("searching for changes\n"))
1206
1211
1207 unknown = []
1212 unknown = []
1208 for h in heads:
1213 for h in heads:
1209 if h not in m:
1214 if h not in m:
1210 unknown.append(h)
1215 unknown.append(h)
1211 else:
1216 else:
1212 base[h] = 1
1217 base[h] = 1
1213
1218
1214 if not unknown:
1219 if not unknown:
1215 return []
1220 return []
1216
1221
1217 req = dict.fromkeys(unknown)
1222 req = dict.fromkeys(unknown)
1218 reqcnt = 0
1223 reqcnt = 0
1219
1224
1220 # search through remote branches
1225 # search through remote branches
1221 # a 'branch' here is a linear segment of history, with four parts:
1226 # a 'branch' here is a linear segment of history, with four parts:
1222 # head, root, first parent, second parent
1227 # head, root, first parent, second parent
1223 # (a branch always has two parents (or none) by definition)
1228 # (a branch always has two parents (or none) by definition)
1224 unknown = remote.branches(unknown)
1229 unknown = remote.branches(unknown)
1225 while unknown:
1230 while unknown:
1226 r = []
1231 r = []
1227 while unknown:
1232 while unknown:
1228 n = unknown.pop(0)
1233 n = unknown.pop(0)
1229 if n[0] in seen:
1234 if n[0] in seen:
1230 continue
1235 continue
1231
1236
1232 self.ui.debug(_("examining %s:%s\n")
1237 self.ui.debug(_("examining %s:%s\n")
1233 % (short(n[0]), short(n[1])))
1238 % (short(n[0]), short(n[1])))
1234 if n[0] == nullid: # found the end of the branch
1239 if n[0] == nullid: # found the end of the branch
1235 pass
1240 pass
1236 elif n in seenbranch:
1241 elif n in seenbranch:
1237 self.ui.debug(_("branch already found\n"))
1242 self.ui.debug(_("branch already found\n"))
1238 continue
1243 continue
1239 elif n[1] and n[1] in m: # do we know the base?
1244 elif n[1] and n[1] in m: # do we know the base?
1240 self.ui.debug(_("found incomplete branch %s:%s\n")
1245 self.ui.debug(_("found incomplete branch %s:%s\n")
1241 % (short(n[0]), short(n[1])))
1246 % (short(n[0]), short(n[1])))
1242 search.append(n) # schedule branch range for scanning
1247 search.append(n) # schedule branch range for scanning
1243 seenbranch[n] = 1
1248 seenbranch[n] = 1
1244 else:
1249 else:
1245 if n[1] not in seen and n[1] not in fetch:
1250 if n[1] not in seen and n[1] not in fetch:
1246 if n[2] in m and n[3] in m:
1251 if n[2] in m and n[3] in m:
1247 self.ui.debug(_("found new changeset %s\n") %
1252 self.ui.debug(_("found new changeset %s\n") %
1248 short(n[1]))
1253 short(n[1]))
1249 fetch[n[1]] = 1 # earliest unknown
1254 fetch[n[1]] = 1 # earliest unknown
1250 for p in n[2:4]:
1255 for p in n[2:4]:
1251 if p in m:
1256 if p in m:
1252 base[p] = 1 # latest known
1257 base[p] = 1 # latest known
1253
1258
1254 for p in n[2:4]:
1259 for p in n[2:4]:
1255 if p not in req and p not in m:
1260 if p not in req and p not in m:
1256 r.append(p)
1261 r.append(p)
1257 req[p] = 1
1262 req[p] = 1
1258 seen[n[0]] = 1
1263 seen[n[0]] = 1
1259
1264
1260 if r:
1265 if r:
1261 reqcnt += 1
1266 reqcnt += 1
1262 self.ui.debug(_("request %d: %s\n") %
1267 self.ui.debug(_("request %d: %s\n") %
1263 (reqcnt, " ".join(map(short, r))))
1268 (reqcnt, " ".join(map(short, r))))
1264 for p in xrange(0, len(r), 10):
1269 for p in xrange(0, len(r), 10):
1265 for b in remote.branches(r[p:p+10]):
1270 for b in remote.branches(r[p:p+10]):
1266 self.ui.debug(_("received %s:%s\n") %
1271 self.ui.debug(_("received %s:%s\n") %
1267 (short(b[0]), short(b[1])))
1272 (short(b[0]), short(b[1])))
1268 unknown.append(b)
1273 unknown.append(b)
1269
1274
1270 # do binary search on the branches we found
1275 # do binary search on the branches we found
1271 while search:
1276 while search:
1272 n = search.pop(0)
1277 n = search.pop(0)
1273 reqcnt += 1
1278 reqcnt += 1
1274 l = remote.between([(n[0], n[1])])[0]
1279 l = remote.between([(n[0], n[1])])[0]
1275 l.append(n[1])
1280 l.append(n[1])
1276 p = n[0]
1281 p = n[0]
1277 f = 1
1282 f = 1
1278 for i in l:
1283 for i in l:
1279 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1284 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1280 if i in m:
1285 if i in m:
1281 if f <= 2:
1286 if f <= 2:
1282 self.ui.debug(_("found new branch changeset %s\n") %
1287 self.ui.debug(_("found new branch changeset %s\n") %
1283 short(p))
1288 short(p))
1284 fetch[p] = 1
1289 fetch[p] = 1
1285 base[i] = 1
1290 base[i] = 1
1286 else:
1291 else:
1287 self.ui.debug(_("narrowed branch search to %s:%s\n")
1292 self.ui.debug(_("narrowed branch search to %s:%s\n")
1288 % (short(p), short(i)))
1293 % (short(p), short(i)))
1289 search.append((p, i))
1294 search.append((p, i))
1290 break
1295 break
1291 p, f = i, f * 2
1296 p, f = i, f * 2
1292
1297
1293 # sanity check our fetch list
1298 # sanity check our fetch list
1294 for f in fetch.keys():
1299 for f in fetch.keys():
1295 if f in m:
1300 if f in m:
1296 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1301 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1297
1302
1298 if base.keys() == [nullid]:
1303 if base.keys() == [nullid]:
1299 if force:
1304 if force:
1300 self.ui.warn(_("warning: repository is unrelated\n"))
1305 self.ui.warn(_("warning: repository is unrelated\n"))
1301 else:
1306 else:
1302 raise util.Abort(_("repository is unrelated"))
1307 raise util.Abort(_("repository is unrelated"))
1303
1308
1304 self.ui.debug(_("found new changesets starting at ") +
1309 self.ui.debug(_("found new changesets starting at ") +
1305 " ".join([short(f) for f in fetch]) + "\n")
1310 " ".join([short(f) for f in fetch]) + "\n")
1306
1311
1307 self.ui.debug(_("%d total queries\n") % reqcnt)
1312 self.ui.debug(_("%d total queries\n") % reqcnt)
1308
1313
1309 return fetch.keys()
1314 return fetch.keys()
1310
1315
1311 def findoutgoing(self, remote, base=None, heads=None, force=False):
1316 def findoutgoing(self, remote, base=None, heads=None, force=False):
1312 """Return list of nodes that are roots of subsets not in remote
1317 """Return list of nodes that are roots of subsets not in remote
1313
1318
1314 If base dict is specified, assume that these nodes and their parents
1319 If base dict is specified, assume that these nodes and their parents
1315 exist on the remote side.
1320 exist on the remote side.
1316 If a list of heads is specified, return only nodes which are heads
1321 If a list of heads is specified, return only nodes which are heads
1317 or ancestors of these heads, and return a second element which
1322 or ancestors of these heads, and return a second element which
1318 contains all remote heads which get new children.
1323 contains all remote heads which get new children.
1319 """
1324 """
1320 if base == None:
1325 if base == None:
1321 base = {}
1326 base = {}
1322 self.findincoming(remote, base, heads, force=force)
1327 self.findincoming(remote, base, heads, force=force)
1323
1328
1324 self.ui.debug(_("common changesets up to ")
1329 self.ui.debug(_("common changesets up to ")
1325 + " ".join(map(short, base.keys())) + "\n")
1330 + " ".join(map(short, base.keys())) + "\n")
1326
1331
1327 remain = dict.fromkeys(self.changelog.nodemap)
1332 remain = dict.fromkeys(self.changelog.nodemap)
1328
1333
1329 # prune everything remote has from the tree
1334 # prune everything remote has from the tree
1330 del remain[nullid]
1335 del remain[nullid]
1331 remove = base.keys()
1336 remove = base.keys()
1332 while remove:
1337 while remove:
1333 n = remove.pop(0)
1338 n = remove.pop(0)
1334 if n in remain:
1339 if n in remain:
1335 del remain[n]
1340 del remain[n]
1336 for p in self.changelog.parents(n):
1341 for p in self.changelog.parents(n):
1337 remove.append(p)
1342 remove.append(p)
1338
1343
1339 # find every node whose parents have been pruned
1344 # find every node whose parents have been pruned
1340 subset = []
1345 subset = []
1341 # find every remote head that will get new children
1346 # find every remote head that will get new children
1342 updated_heads = {}
1347 updated_heads = {}
1343 for n in remain:
1348 for n in remain:
1344 p1, p2 = self.changelog.parents(n)
1349 p1, p2 = self.changelog.parents(n)
1345 if p1 not in remain and p2 not in remain:
1350 if p1 not in remain and p2 not in remain:
1346 subset.append(n)
1351 subset.append(n)
1347 if heads:
1352 if heads:
1348 if p1 in heads:
1353 if p1 in heads:
1349 updated_heads[p1] = True
1354 updated_heads[p1] = True
1350 if p2 in heads:
1355 if p2 in heads:
1351 updated_heads[p2] = True
1356 updated_heads[p2] = True
1352
1357
1353 # this is the set of all roots we have to push
1358 # this is the set of all roots we have to push
1354 if heads:
1359 if heads:
1355 return subset, updated_heads.keys()
1360 return subset, updated_heads.keys()
1356 else:
1361 else:
1357 return subset
1362 return subset
1358
1363
1359 def pull(self, remote, heads=None, force=False, lock=None):
1364 def pull(self, remote, heads=None, force=False, lock=None):
1360 mylock = False
1365 mylock = False
1361 if not lock:
1366 if not lock:
1362 lock = self.lock()
1367 lock = self.lock()
1363 mylock = True
1368 mylock = True
1364
1369
1365 try:
1370 try:
1366 fetch = self.findincoming(remote, force=force)
1371 fetch = self.findincoming(remote, force=force)
1367 if fetch == [nullid]:
1372 if fetch == [nullid]:
1368 self.ui.status(_("requesting all changes\n"))
1373 self.ui.status(_("requesting all changes\n"))
1369
1374
1370 if not fetch:
1375 if not fetch:
1371 self.ui.status(_("no changes found\n"))
1376 self.ui.status(_("no changes found\n"))
1372 return 0
1377 return 0
1373
1378
1374 if heads is None:
1379 if heads is None:
1375 cg = remote.changegroup(fetch, 'pull')
1380 cg = remote.changegroup(fetch, 'pull')
1376 else:
1381 else:
1377 if 'changegroupsubset' not in remote.capabilities:
1382 if 'changegroupsubset' not in remote.capabilities:
1378 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1383 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1379 cg = remote.changegroupsubset(fetch, heads, 'pull')
1384 cg = remote.changegroupsubset(fetch, heads, 'pull')
1380 return self.addchangegroup(cg, 'pull', remote.url())
1385 return self.addchangegroup(cg, 'pull', remote.url())
1381 finally:
1386 finally:
1382 if mylock:
1387 if mylock:
1383 lock.release()
1388 lock.release()
1384
1389
1385 def push(self, remote, force=False, revs=None):
1390 def push(self, remote, force=False, revs=None):
1386 # there are two ways to push to remote repo:
1391 # there are two ways to push to remote repo:
1387 #
1392 #
1388 # addchangegroup assumes local user can lock remote
1393 # addchangegroup assumes local user can lock remote
1389 # repo (local filesystem, old ssh servers).
1394 # repo (local filesystem, old ssh servers).
1390 #
1395 #
1391 # unbundle assumes local user cannot lock remote repo (new ssh
1396 # unbundle assumes local user cannot lock remote repo (new ssh
1392 # servers, http servers).
1397 # servers, http servers).
1393
1398
1394 if remote.capable('unbundle'):
1399 if remote.capable('unbundle'):
1395 return self.push_unbundle(remote, force, revs)
1400 return self.push_unbundle(remote, force, revs)
1396 return self.push_addchangegroup(remote, force, revs)
1401 return self.push_addchangegroup(remote, force, revs)
1397
1402
1398 def prepush(self, remote, force, revs):
1403 def prepush(self, remote, force, revs):
1399 base = {}
1404 base = {}
1400 remote_heads = remote.heads()
1405 remote_heads = remote.heads()
1401 inc = self.findincoming(remote, base, remote_heads, force=force)
1406 inc = self.findincoming(remote, base, remote_heads, force=force)
1402
1407
1403 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1408 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1404 if revs is not None:
1409 if revs is not None:
1405 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1410 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1406 else:
1411 else:
1407 bases, heads = update, self.changelog.heads()
1412 bases, heads = update, self.changelog.heads()
1408
1413
1409 if not bases:
1414 if not bases:
1410 self.ui.status(_("no changes found\n"))
1415 self.ui.status(_("no changes found\n"))
1411 return None, 1
1416 return None, 1
1412 elif not force:
1417 elif not force:
1413 # check if we're creating new remote heads
1418 # check if we're creating new remote heads
1414 # to be a remote head after push, node must be either
1419 # to be a remote head after push, node must be either
1415 # - unknown locally
1420 # - unknown locally
1416 # - a local outgoing head descended from update
1421 # - a local outgoing head descended from update
1417 # - a remote head that's known locally and not
1422 # - a remote head that's known locally and not
1418 # ancestral to an outgoing head
1423 # ancestral to an outgoing head
1419
1424
1420 warn = 0
1425 warn = 0
1421
1426
1422 if remote_heads == [nullid]:
1427 if remote_heads == [nullid]:
1423 warn = 0
1428 warn = 0
1424 elif not revs and len(heads) > len(remote_heads):
1429 elif not revs and len(heads) > len(remote_heads):
1425 warn = 1
1430 warn = 1
1426 else:
1431 else:
1427 newheads = list(heads)
1432 newheads = list(heads)
1428 for r in remote_heads:
1433 for r in remote_heads:
1429 if r in self.changelog.nodemap:
1434 if r in self.changelog.nodemap:
1430 desc = self.changelog.heads(r, heads)
1435 desc = self.changelog.heads(r, heads)
1431 l = [h for h in heads if h in desc]
1436 l = [h for h in heads if h in desc]
1432 if not l:
1437 if not l:
1433 newheads.append(r)
1438 newheads.append(r)
1434 else:
1439 else:
1435 newheads.append(r)
1440 newheads.append(r)
1436 if len(newheads) > len(remote_heads):
1441 if len(newheads) > len(remote_heads):
1437 warn = 1
1442 warn = 1
1438
1443
1439 if warn:
1444 if warn:
1440 self.ui.warn(_("abort: push creates new remote branches!\n"))
1445 self.ui.warn(_("abort: push creates new remote branches!\n"))
1441 self.ui.status(_("(did you forget to merge?"
1446 self.ui.status(_("(did you forget to merge?"
1442 " use push -f to force)\n"))
1447 " use push -f to force)\n"))
1443 return None, 1
1448 return None, 1
1444 elif inc:
1449 elif inc:
1445 self.ui.warn(_("note: unsynced remote changes!\n"))
1450 self.ui.warn(_("note: unsynced remote changes!\n"))
1446
1451
1447
1452
1448 if revs is None:
1453 if revs is None:
1449 cg = self.changegroup(update, 'push')
1454 cg = self.changegroup(update, 'push')
1450 else:
1455 else:
1451 cg = self.changegroupsubset(update, revs, 'push')
1456 cg = self.changegroupsubset(update, revs, 'push')
1452 return cg, remote_heads
1457 return cg, remote_heads
1453
1458
1454 def push_addchangegroup(self, remote, force, revs):
1459 def push_addchangegroup(self, remote, force, revs):
1455 lock = remote.lock()
1460 lock = remote.lock()
1456
1461
1457 ret = self.prepush(remote, force, revs)
1462 ret = self.prepush(remote, force, revs)
1458 if ret[0] is not None:
1463 if ret[0] is not None:
1459 cg, remote_heads = ret
1464 cg, remote_heads = ret
1460 return remote.addchangegroup(cg, 'push', self.url())
1465 return remote.addchangegroup(cg, 'push', self.url())
1461 return ret[1]
1466 return ret[1]
1462
1467
1463 def push_unbundle(self, remote, force, revs):
1468 def push_unbundle(self, remote, force, revs):
1464 # local repo finds heads on server, finds out what revs it
1469 # local repo finds heads on server, finds out what revs it
1465 # must push. once revs transferred, if server finds it has
1470 # must push. once revs transferred, if server finds it has
1466 # different heads (someone else won commit/push race), server
1471 # different heads (someone else won commit/push race), server
1467 # aborts.
1472 # aborts.
1468
1473
1469 ret = self.prepush(remote, force, revs)
1474 ret = self.prepush(remote, force, revs)
1470 if ret[0] is not None:
1475 if ret[0] is not None:
1471 cg, remote_heads = ret
1476 cg, remote_heads = ret
1472 if force: remote_heads = ['force']
1477 if force: remote_heads = ['force']
1473 return remote.unbundle(cg, remote_heads, 'push')
1478 return remote.unbundle(cg, remote_heads, 'push')
1474 return ret[1]
1479 return ret[1]
1475
1480
1476 def changegroupinfo(self, nodes):
1481 def changegroupinfo(self, nodes):
1477 self.ui.note(_("%d changesets found\n") % len(nodes))
1482 self.ui.note(_("%d changesets found\n") % len(nodes))
1478 if self.ui.debugflag:
1483 if self.ui.debugflag:
1479 self.ui.debug(_("List of changesets:\n"))
1484 self.ui.debug(_("List of changesets:\n"))
1480 for node in nodes:
1485 for node in nodes:
1481 self.ui.debug("%s\n" % hex(node))
1486 self.ui.debug("%s\n" % hex(node))
1482
1487
1483 def changegroupsubset(self, bases, heads, source):
1488 def changegroupsubset(self, bases, heads, source):
1484 """This function generates a changegroup consisting of all the nodes
1489 """This function generates a changegroup consisting of all the nodes
1485 that are descendents of any of the bases, and ancestors of any of
1490 that are descendents of any of the bases, and ancestors of any of
1486 the heads.
1491 the heads.
1487
1492
1488 It is fairly complex as determining which filenodes and which
1493 It is fairly complex as determining which filenodes and which
1489 manifest nodes need to be included for the changeset to be complete
1494 manifest nodes need to be included for the changeset to be complete
1490 is non-trivial.
1495 is non-trivial.
1491
1496
1492 Another wrinkle is doing the reverse, figuring out which changeset in
1497 Another wrinkle is doing the reverse, figuring out which changeset in
1493 the changegroup a particular filenode or manifestnode belongs to."""
1498 the changegroup a particular filenode or manifestnode belongs to."""
1494
1499
1495 self.hook('preoutgoing', throw=True, source=source)
1500 self.hook('preoutgoing', throw=True, source=source)
1496
1501
1497 # Set up some initial variables
1502 # Set up some initial variables
1498 # Make it easy to refer to self.changelog
1503 # Make it easy to refer to self.changelog
1499 cl = self.changelog
1504 cl = self.changelog
1500 # msng is short for missing - compute the list of changesets in this
1505 # msng is short for missing - compute the list of changesets in this
1501 # changegroup.
1506 # changegroup.
1502 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1507 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1503 self.changegroupinfo(msng_cl_lst)
1508 self.changegroupinfo(msng_cl_lst)
1504 # Some bases may turn out to be superfluous, and some heads may be
1509 # Some bases may turn out to be superfluous, and some heads may be
1505 # too. nodesbetween will return the minimal set of bases and heads
1510 # too. nodesbetween will return the minimal set of bases and heads
1506 # necessary to re-create the changegroup.
1511 # necessary to re-create the changegroup.
1507
1512
1508 # Known heads are the list of heads that it is assumed the recipient
1513 # Known heads are the list of heads that it is assumed the recipient
1509 # of this changegroup will know about.
1514 # of this changegroup will know about.
1510 knownheads = {}
1515 knownheads = {}
1511 # We assume that all parents of bases are known heads.
1516 # We assume that all parents of bases are known heads.
1512 for n in bases:
1517 for n in bases:
1513 for p in cl.parents(n):
1518 for p in cl.parents(n):
1514 if p != nullid:
1519 if p != nullid:
1515 knownheads[p] = 1
1520 knownheads[p] = 1
1516 knownheads = knownheads.keys()
1521 knownheads = knownheads.keys()
1517 if knownheads:
1522 if knownheads:
1518 # Now that we know what heads are known, we can compute which
1523 # Now that we know what heads are known, we can compute which
1519 # changesets are known. The recipient must know about all
1524 # changesets are known. The recipient must know about all
1520 # changesets required to reach the known heads from the null
1525 # changesets required to reach the known heads from the null
1521 # changeset.
1526 # changeset.
1522 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1527 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1523 junk = None
1528 junk = None
1524 # Transform the list into an ersatz set.
1529 # Transform the list into an ersatz set.
1525 has_cl_set = dict.fromkeys(has_cl_set)
1530 has_cl_set = dict.fromkeys(has_cl_set)
1526 else:
1531 else:
1527 # If there were no known heads, the recipient cannot be assumed to
1532 # If there were no known heads, the recipient cannot be assumed to
1528 # know about any changesets.
1533 # know about any changesets.
1529 has_cl_set = {}
1534 has_cl_set = {}
1530
1535
1531 # Make it easy to refer to self.manifest
1536 # Make it easy to refer to self.manifest
1532 mnfst = self.manifest
1537 mnfst = self.manifest
1533 # We don't know which manifests are missing yet
1538 # We don't know which manifests are missing yet
1534 msng_mnfst_set = {}
1539 msng_mnfst_set = {}
1535 # Nor do we know which filenodes are missing.
1540 # Nor do we know which filenodes are missing.
1536 msng_filenode_set = {}
1541 msng_filenode_set = {}
1537
1542
1538 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1543 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1539 junk = None
1544 junk = None
1540
1545
1541 # A changeset always belongs to itself, so the changenode lookup
1546 # A changeset always belongs to itself, so the changenode lookup
1542 # function for a changenode is identity.
1547 # function for a changenode is identity.
1543 def identity(x):
1548 def identity(x):
1544 return x
1549 return x
1545
1550
1546 # A function generating function. Sets up an environment for the
1551 # A function generating function. Sets up an environment for the
1547 # inner function.
1552 # inner function.
1548 def cmp_by_rev_func(revlog):
1553 def cmp_by_rev_func(revlog):
1549 # Compare two nodes by their revision number in the environment's
1554 # Compare two nodes by their revision number in the environment's
1550 # revision history. Since the revision number both represents the
1555 # revision history. Since the revision number both represents the
1551 # most efficient order to read the nodes in, and represents a
1556 # most efficient order to read the nodes in, and represents a
1552 # topological sorting of the nodes, this function is often useful.
1557 # topological sorting of the nodes, this function is often useful.
1553 def cmp_by_rev(a, b):
1558 def cmp_by_rev(a, b):
1554 return cmp(revlog.rev(a), revlog.rev(b))
1559 return cmp(revlog.rev(a), revlog.rev(b))
1555 return cmp_by_rev
1560 return cmp_by_rev
1556
1561
1557 # If we determine that a particular file or manifest node must be a
1562 # If we determine that a particular file or manifest node must be a
1558 # node that the recipient of the changegroup will already have, we can
1563 # node that the recipient of the changegroup will already have, we can
1559 # also assume the recipient will have all the parents. This function
1564 # also assume the recipient will have all the parents. This function
1560 # prunes them from the set of missing nodes.
1565 # prunes them from the set of missing nodes.
1561 def prune_parents(revlog, hasset, msngset):
1566 def prune_parents(revlog, hasset, msngset):
1562 haslst = hasset.keys()
1567 haslst = hasset.keys()
1563 haslst.sort(cmp_by_rev_func(revlog))
1568 haslst.sort(cmp_by_rev_func(revlog))
1564 for node in haslst:
1569 for node in haslst:
1565 parentlst = [p for p in revlog.parents(node) if p != nullid]
1570 parentlst = [p for p in revlog.parents(node) if p != nullid]
1566 while parentlst:
1571 while parentlst:
1567 n = parentlst.pop()
1572 n = parentlst.pop()
1568 if n not in hasset:
1573 if n not in hasset:
1569 hasset[n] = 1
1574 hasset[n] = 1
1570 p = [p for p in revlog.parents(n) if p != nullid]
1575 p = [p for p in revlog.parents(n) if p != nullid]
1571 parentlst.extend(p)
1576 parentlst.extend(p)
1572 for n in hasset:
1577 for n in hasset:
1573 msngset.pop(n, None)
1578 msngset.pop(n, None)
1574
1579
1575 # This is a function generating function used to set up an environment
1580 # This is a function generating function used to set up an environment
1576 # for the inner function to execute in.
1581 # for the inner function to execute in.
1577 def manifest_and_file_collector(changedfileset):
1582 def manifest_and_file_collector(changedfileset):
1578 # This is an information gathering function that gathers
1583 # This is an information gathering function that gathers
1579 # information from each changeset node that goes out as part of
1584 # information from each changeset node that goes out as part of
1580 # the changegroup. The information gathered is a list of which
1585 # the changegroup. The information gathered is a list of which
1581 # manifest nodes are potentially required (the recipient may
1586 # manifest nodes are potentially required (the recipient may
1582 # already have them) and total list of all files which were
1587 # already have them) and total list of all files which were
1583 # changed in any changeset in the changegroup.
1588 # changed in any changeset in the changegroup.
1584 #
1589 #
1585 # We also remember the first changenode we saw any manifest
1590 # We also remember the first changenode we saw any manifest
1586 # referenced by so we can later determine which changenode 'owns'
1591 # referenced by so we can later determine which changenode 'owns'
1587 # the manifest.
1592 # the manifest.
1588 def collect_manifests_and_files(clnode):
1593 def collect_manifests_and_files(clnode):
1589 c = cl.read(clnode)
1594 c = cl.read(clnode)
1590 for f in c[3]:
1595 for f in c[3]:
1591 # This is to make sure we only have one instance of each
1596 # This is to make sure we only have one instance of each
1592 # filename string for each filename.
1597 # filename string for each filename.
1593 changedfileset.setdefault(f, f)
1598 changedfileset.setdefault(f, f)
1594 msng_mnfst_set.setdefault(c[0], clnode)
1599 msng_mnfst_set.setdefault(c[0], clnode)
1595 return collect_manifests_and_files
1600 return collect_manifests_and_files
1596
1601
1597 # Figure out which manifest nodes (of the ones we think might be part
1602 # Figure out which manifest nodes (of the ones we think might be part
1598 # of the changegroup) the recipient must know about and remove them
1603 # of the changegroup) the recipient must know about and remove them
1599 # from the changegroup.
1604 # from the changegroup.
1600 def prune_manifests():
1605 def prune_manifests():
1601 has_mnfst_set = {}
1606 has_mnfst_set = {}
1602 for n in msng_mnfst_set:
1607 for n in msng_mnfst_set:
1603 # If a 'missing' manifest thinks it belongs to a changenode
1608 # If a 'missing' manifest thinks it belongs to a changenode
1604 # the recipient is assumed to have, obviously the recipient
1609 # the recipient is assumed to have, obviously the recipient
1605 # must have that manifest.
1610 # must have that manifest.
1606 linknode = cl.node(mnfst.linkrev(n))
1611 linknode = cl.node(mnfst.linkrev(n))
1607 if linknode in has_cl_set:
1612 if linknode in has_cl_set:
1608 has_mnfst_set[n] = 1
1613 has_mnfst_set[n] = 1
1609 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1614 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1610
1615
1611 # Use the information collected in collect_manifests_and_files to say
1616 # Use the information collected in collect_manifests_and_files to say
1612 # which changenode any manifestnode belongs to.
1617 # which changenode any manifestnode belongs to.
1613 def lookup_manifest_link(mnfstnode):
1618 def lookup_manifest_link(mnfstnode):
1614 return msng_mnfst_set[mnfstnode]
1619 return msng_mnfst_set[mnfstnode]
1615
1620
1616 # A function generating function that sets up the initial environment
1621 # A function generating function that sets up the initial environment
1617 # the inner function.
1622 # the inner function.
1618 def filenode_collector(changedfiles):
1623 def filenode_collector(changedfiles):
1619 next_rev = [0]
1624 next_rev = [0]
1620 # This gathers information from each manifestnode included in the
1625 # This gathers information from each manifestnode included in the
1621 # changegroup about which filenodes the manifest node references
1626 # changegroup about which filenodes the manifest node references
1622 # so we can include those in the changegroup too.
1627 # so we can include those in the changegroup too.
1623 #
1628 #
1624 # It also remembers which changenode each filenode belongs to. It
1629 # It also remembers which changenode each filenode belongs to. It
1625 # does this by assuming the a filenode belongs to the changenode
1630 # does this by assuming the a filenode belongs to the changenode
1626 # the first manifest that references it belongs to.
1631 # the first manifest that references it belongs to.
1627 def collect_msng_filenodes(mnfstnode):
1632 def collect_msng_filenodes(mnfstnode):
1628 r = mnfst.rev(mnfstnode)
1633 r = mnfst.rev(mnfstnode)
1629 if r == next_rev[0]:
1634 if r == next_rev[0]:
1630 # If the last rev we looked at was the one just previous,
1635 # If the last rev we looked at was the one just previous,
1631 # we only need to see a diff.
1636 # we only need to see a diff.
1632 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1637 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1633 # For each line in the delta
1638 # For each line in the delta
1634 for dline in delta.splitlines():
1639 for dline in delta.splitlines():
1635 # get the filename and filenode for that line
1640 # get the filename and filenode for that line
1636 f, fnode = dline.split('\0')
1641 f, fnode = dline.split('\0')
1637 fnode = bin(fnode[:40])
1642 fnode = bin(fnode[:40])
1638 f = changedfiles.get(f, None)
1643 f = changedfiles.get(f, None)
1639 # And if the file is in the list of files we care
1644 # And if the file is in the list of files we care
1640 # about.
1645 # about.
1641 if f is not None:
1646 if f is not None:
1642 # Get the changenode this manifest belongs to
1647 # Get the changenode this manifest belongs to
1643 clnode = msng_mnfst_set[mnfstnode]
1648 clnode = msng_mnfst_set[mnfstnode]
1644 # Create the set of filenodes for the file if
1649 # Create the set of filenodes for the file if
1645 # there isn't one already.
1650 # there isn't one already.
1646 ndset = msng_filenode_set.setdefault(f, {})
1651 ndset = msng_filenode_set.setdefault(f, {})
1647 # And set the filenode's changelog node to the
1652 # And set the filenode's changelog node to the
1648 # manifest's if it hasn't been set already.
1653 # manifest's if it hasn't been set already.
1649 ndset.setdefault(fnode, clnode)
1654 ndset.setdefault(fnode, clnode)
1650 else:
1655 else:
1651 # Otherwise we need a full manifest.
1656 # Otherwise we need a full manifest.
1652 m = mnfst.read(mnfstnode)
1657 m = mnfst.read(mnfstnode)
1653 # For every file in we care about.
1658 # For every file in we care about.
1654 for f in changedfiles:
1659 for f in changedfiles:
1655 fnode = m.get(f, None)
1660 fnode = m.get(f, None)
1656 # If it's in the manifest
1661 # If it's in the manifest
1657 if fnode is not None:
1662 if fnode is not None:
1658 # See comments above.
1663 # See comments above.
1659 clnode = msng_mnfst_set[mnfstnode]
1664 clnode = msng_mnfst_set[mnfstnode]
1660 ndset = msng_filenode_set.setdefault(f, {})
1665 ndset = msng_filenode_set.setdefault(f, {})
1661 ndset.setdefault(fnode, clnode)
1666 ndset.setdefault(fnode, clnode)
1662 # Remember the revision we hope to see next.
1667 # Remember the revision we hope to see next.
1663 next_rev[0] = r + 1
1668 next_rev[0] = r + 1
1664 return collect_msng_filenodes
1669 return collect_msng_filenodes
1665
1670
1666 # We have a list of filenodes we think we need for a file, lets remove
1671 # We have a list of filenodes we think we need for a file, lets remove
1667 # all those we now the recipient must have.
1672 # all those we now the recipient must have.
1668 def prune_filenodes(f, filerevlog):
1673 def prune_filenodes(f, filerevlog):
1669 msngset = msng_filenode_set[f]
1674 msngset = msng_filenode_set[f]
1670 hasset = {}
1675 hasset = {}
1671 # If a 'missing' filenode thinks it belongs to a changenode we
1676 # If a 'missing' filenode thinks it belongs to a changenode we
1672 # assume the recipient must have, then the recipient must have
1677 # assume the recipient must have, then the recipient must have
1673 # that filenode.
1678 # that filenode.
1674 for n in msngset:
1679 for n in msngset:
1675 clnode = cl.node(filerevlog.linkrev(n))
1680 clnode = cl.node(filerevlog.linkrev(n))
1676 if clnode in has_cl_set:
1681 if clnode in has_cl_set:
1677 hasset[n] = 1
1682 hasset[n] = 1
1678 prune_parents(filerevlog, hasset, msngset)
1683 prune_parents(filerevlog, hasset, msngset)
1679
1684
1680 # A function generator function that sets up the a context for the
1685 # A function generator function that sets up the a context for the
1681 # inner function.
1686 # inner function.
1682 def lookup_filenode_link_func(fname):
1687 def lookup_filenode_link_func(fname):
1683 msngset = msng_filenode_set[fname]
1688 msngset = msng_filenode_set[fname]
1684 # Lookup the changenode the filenode belongs to.
1689 # Lookup the changenode the filenode belongs to.
1685 def lookup_filenode_link(fnode):
1690 def lookup_filenode_link(fnode):
1686 return msngset[fnode]
1691 return msngset[fnode]
1687 return lookup_filenode_link
1692 return lookup_filenode_link
1688
1693
1689 # Now that we have all theses utility functions to help out and
1694 # Now that we have all theses utility functions to help out and
1690 # logically divide up the task, generate the group.
1695 # logically divide up the task, generate the group.
1691 def gengroup():
1696 def gengroup():
1692 # The set of changed files starts empty.
1697 # The set of changed files starts empty.
1693 changedfiles = {}
1698 changedfiles = {}
1694 # Create a changenode group generator that will call our functions
1699 # Create a changenode group generator that will call our functions
1695 # back to lookup the owning changenode and collect information.
1700 # back to lookup the owning changenode and collect information.
1696 group = cl.group(msng_cl_lst, identity,
1701 group = cl.group(msng_cl_lst, identity,
1697 manifest_and_file_collector(changedfiles))
1702 manifest_and_file_collector(changedfiles))
1698 for chnk in group:
1703 for chnk in group:
1699 yield chnk
1704 yield chnk
1700
1705
1701 # The list of manifests has been collected by the generator
1706 # The list of manifests has been collected by the generator
1702 # calling our functions back.
1707 # calling our functions back.
1703 prune_manifests()
1708 prune_manifests()
1704 msng_mnfst_lst = msng_mnfst_set.keys()
1709 msng_mnfst_lst = msng_mnfst_set.keys()
1705 # Sort the manifestnodes by revision number.
1710 # Sort the manifestnodes by revision number.
1706 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1711 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1707 # Create a generator for the manifestnodes that calls our lookup
1712 # Create a generator for the manifestnodes that calls our lookup
1708 # and data collection functions back.
1713 # and data collection functions back.
1709 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1714 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1710 filenode_collector(changedfiles))
1715 filenode_collector(changedfiles))
1711 for chnk in group:
1716 for chnk in group:
1712 yield chnk
1717 yield chnk
1713
1718
1714 # These are no longer needed, dereference and toss the memory for
1719 # These are no longer needed, dereference and toss the memory for
1715 # them.
1720 # them.
1716 msng_mnfst_lst = None
1721 msng_mnfst_lst = None
1717 msng_mnfst_set.clear()
1722 msng_mnfst_set.clear()
1718
1723
1719 changedfiles = changedfiles.keys()
1724 changedfiles = changedfiles.keys()
1720 changedfiles.sort()
1725 changedfiles.sort()
1721 # Go through all our files in order sorted by name.
1726 # Go through all our files in order sorted by name.
1722 for fname in changedfiles:
1727 for fname in changedfiles:
1723 filerevlog = self.file(fname)
1728 filerevlog = self.file(fname)
1724 # Toss out the filenodes that the recipient isn't really
1729 # Toss out the filenodes that the recipient isn't really
1725 # missing.
1730 # missing.
1726 if msng_filenode_set.has_key(fname):
1731 if msng_filenode_set.has_key(fname):
1727 prune_filenodes(fname, filerevlog)
1732 prune_filenodes(fname, filerevlog)
1728 msng_filenode_lst = msng_filenode_set[fname].keys()
1733 msng_filenode_lst = msng_filenode_set[fname].keys()
1729 else:
1734 else:
1730 msng_filenode_lst = []
1735 msng_filenode_lst = []
1731 # If any filenodes are left, generate the group for them,
1736 # If any filenodes are left, generate the group for them,
1732 # otherwise don't bother.
1737 # otherwise don't bother.
1733 if len(msng_filenode_lst) > 0:
1738 if len(msng_filenode_lst) > 0:
1734 yield changegroup.genchunk(fname)
1739 yield changegroup.genchunk(fname)
1735 # Sort the filenodes by their revision #
1740 # Sort the filenodes by their revision #
1736 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1741 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1737 # Create a group generator and only pass in a changenode
1742 # Create a group generator and only pass in a changenode
1738 # lookup function as we need to collect no information
1743 # lookup function as we need to collect no information
1739 # from filenodes.
1744 # from filenodes.
1740 group = filerevlog.group(msng_filenode_lst,
1745 group = filerevlog.group(msng_filenode_lst,
1741 lookup_filenode_link_func(fname))
1746 lookup_filenode_link_func(fname))
1742 for chnk in group:
1747 for chnk in group:
1743 yield chnk
1748 yield chnk
1744 if msng_filenode_set.has_key(fname):
1749 if msng_filenode_set.has_key(fname):
1745 # Don't need this anymore, toss it to free memory.
1750 # Don't need this anymore, toss it to free memory.
1746 del msng_filenode_set[fname]
1751 del msng_filenode_set[fname]
1747 # Signal that no more groups are left.
1752 # Signal that no more groups are left.
1748 yield changegroup.closechunk()
1753 yield changegroup.closechunk()
1749
1754
1750 if msng_cl_lst:
1755 if msng_cl_lst:
1751 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1756 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1752
1757
1753 return util.chunkbuffer(gengroup())
1758 return util.chunkbuffer(gengroup())
1754
1759
1755 def changegroup(self, basenodes, source):
1760 def changegroup(self, basenodes, source):
1756 """Generate a changegroup of all nodes that we have that a recipient
1761 """Generate a changegroup of all nodes that we have that a recipient
1757 doesn't.
1762 doesn't.
1758
1763
1759 This is much easier than the previous function as we can assume that
1764 This is much easier than the previous function as we can assume that
1760 the recipient has any changenode we aren't sending them."""
1765 the recipient has any changenode we aren't sending them."""
1761
1766
1762 self.hook('preoutgoing', throw=True, source=source)
1767 self.hook('preoutgoing', throw=True, source=source)
1763
1768
1764 cl = self.changelog
1769 cl = self.changelog
1765 nodes = cl.nodesbetween(basenodes, None)[0]
1770 nodes = cl.nodesbetween(basenodes, None)[0]
1766 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1771 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1767 self.changegroupinfo(nodes)
1772 self.changegroupinfo(nodes)
1768
1773
1769 def identity(x):
1774 def identity(x):
1770 return x
1775 return x
1771
1776
1772 def gennodelst(revlog):
1777 def gennodelst(revlog):
1773 for r in xrange(0, revlog.count()):
1778 for r in xrange(0, revlog.count()):
1774 n = revlog.node(r)
1779 n = revlog.node(r)
1775 if revlog.linkrev(n) in revset:
1780 if revlog.linkrev(n) in revset:
1776 yield n
1781 yield n
1777
1782
1778 def changed_file_collector(changedfileset):
1783 def changed_file_collector(changedfileset):
1779 def collect_changed_files(clnode):
1784 def collect_changed_files(clnode):
1780 c = cl.read(clnode)
1785 c = cl.read(clnode)
1781 for fname in c[3]:
1786 for fname in c[3]:
1782 changedfileset[fname] = 1
1787 changedfileset[fname] = 1
1783 return collect_changed_files
1788 return collect_changed_files
1784
1789
1785 def lookuprevlink_func(revlog):
1790 def lookuprevlink_func(revlog):
1786 def lookuprevlink(n):
1791 def lookuprevlink(n):
1787 return cl.node(revlog.linkrev(n))
1792 return cl.node(revlog.linkrev(n))
1788 return lookuprevlink
1793 return lookuprevlink
1789
1794
1790 def gengroup():
1795 def gengroup():
1791 # construct a list of all changed files
1796 # construct a list of all changed files
1792 changedfiles = {}
1797 changedfiles = {}
1793
1798
1794 for chnk in cl.group(nodes, identity,
1799 for chnk in cl.group(nodes, identity,
1795 changed_file_collector(changedfiles)):
1800 changed_file_collector(changedfiles)):
1796 yield chnk
1801 yield chnk
1797 changedfiles = changedfiles.keys()
1802 changedfiles = changedfiles.keys()
1798 changedfiles.sort()
1803 changedfiles.sort()
1799
1804
1800 mnfst = self.manifest
1805 mnfst = self.manifest
1801 nodeiter = gennodelst(mnfst)
1806 nodeiter = gennodelst(mnfst)
1802 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1807 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1803 yield chnk
1808 yield chnk
1804
1809
1805 for fname in changedfiles:
1810 for fname in changedfiles:
1806 filerevlog = self.file(fname)
1811 filerevlog = self.file(fname)
1807 nodeiter = gennodelst(filerevlog)
1812 nodeiter = gennodelst(filerevlog)
1808 nodeiter = list(nodeiter)
1813 nodeiter = list(nodeiter)
1809 if nodeiter:
1814 if nodeiter:
1810 yield changegroup.genchunk(fname)
1815 yield changegroup.genchunk(fname)
1811 lookup = lookuprevlink_func(filerevlog)
1816 lookup = lookuprevlink_func(filerevlog)
1812 for chnk in filerevlog.group(nodeiter, lookup):
1817 for chnk in filerevlog.group(nodeiter, lookup):
1813 yield chnk
1818 yield chnk
1814
1819
1815 yield changegroup.closechunk()
1820 yield changegroup.closechunk()
1816
1821
1817 if nodes:
1822 if nodes:
1818 self.hook('outgoing', node=hex(nodes[0]), source=source)
1823 self.hook('outgoing', node=hex(nodes[0]), source=source)
1819
1824
1820 return util.chunkbuffer(gengroup())
1825 return util.chunkbuffer(gengroup())
1821
1826
1822 def addchangegroup(self, source, srctype, url):
1827 def addchangegroup(self, source, srctype, url):
1823 """add changegroup to repo.
1828 """add changegroup to repo.
1824
1829
1825 return values:
1830 return values:
1826 - nothing changed or no source: 0
1831 - nothing changed or no source: 0
1827 - more heads than before: 1+added heads (2..n)
1832 - more heads than before: 1+added heads (2..n)
1828 - less heads than before: -1-removed heads (-2..-n)
1833 - less heads than before: -1-removed heads (-2..-n)
1829 - number of heads stays the same: 1
1834 - number of heads stays the same: 1
1830 """
1835 """
1831 def csmap(x):
1836 def csmap(x):
1832 self.ui.debug(_("add changeset %s\n") % short(x))
1837 self.ui.debug(_("add changeset %s\n") % short(x))
1833 return cl.count()
1838 return cl.count()
1834
1839
1835 def revmap(x):
1840 def revmap(x):
1836 return cl.rev(x)
1841 return cl.rev(x)
1837
1842
1838 if not source:
1843 if not source:
1839 return 0
1844 return 0
1840
1845
1841 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1846 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1842
1847
1843 changesets = files = revisions = 0
1848 changesets = files = revisions = 0
1844
1849
1845 tr = self.transaction()
1850 tr = self.transaction()
1846
1851
1847 # write changelog data to temp files so concurrent readers will not see
1852 # write changelog data to temp files so concurrent readers will not see
1848 # inconsistent view
1853 # inconsistent view
1849 cl = None
1854 cl = None
1850 try:
1855 try:
1851 cl = appendfile.appendchangelog(self.sopener,
1856 cl = appendfile.appendchangelog(self.sopener,
1852 self.changelog.version)
1857 self.changelog.version)
1853
1858
1854 oldheads = len(cl.heads())
1859 oldheads = len(cl.heads())
1855
1860
1856 # pull off the changeset group
1861 # pull off the changeset group
1857 self.ui.status(_("adding changesets\n"))
1862 self.ui.status(_("adding changesets\n"))
1858 cor = cl.count() - 1
1863 cor = cl.count() - 1
1859 chunkiter = changegroup.chunkiter(source)
1864 chunkiter = changegroup.chunkiter(source)
1860 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1865 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1861 raise util.Abort(_("received changelog group is empty"))
1866 raise util.Abort(_("received changelog group is empty"))
1862 cnr = cl.count() - 1
1867 cnr = cl.count() - 1
1863 changesets = cnr - cor
1868 changesets = cnr - cor
1864
1869
1865 # pull off the manifest group
1870 # pull off the manifest group
1866 self.ui.status(_("adding manifests\n"))
1871 self.ui.status(_("adding manifests\n"))
1867 chunkiter = changegroup.chunkiter(source)
1872 chunkiter = changegroup.chunkiter(source)
1868 # no need to check for empty manifest group here:
1873 # no need to check for empty manifest group here:
1869 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1874 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1870 # no new manifest will be created and the manifest group will
1875 # no new manifest will be created and the manifest group will
1871 # be empty during the pull
1876 # be empty during the pull
1872 self.manifest.addgroup(chunkiter, revmap, tr)
1877 self.manifest.addgroup(chunkiter, revmap, tr)
1873
1878
1874 # process the files
1879 # process the files
1875 self.ui.status(_("adding file changes\n"))
1880 self.ui.status(_("adding file changes\n"))
1876 while 1:
1881 while 1:
1877 f = changegroup.getchunk(source)
1882 f = changegroup.getchunk(source)
1878 if not f:
1883 if not f:
1879 break
1884 break
1880 self.ui.debug(_("adding %s revisions\n") % f)
1885 self.ui.debug(_("adding %s revisions\n") % f)
1881 fl = self.file(f)
1886 fl = self.file(f)
1882 o = fl.count()
1887 o = fl.count()
1883 chunkiter = changegroup.chunkiter(source)
1888 chunkiter = changegroup.chunkiter(source)
1884 if fl.addgroup(chunkiter, revmap, tr) is None:
1889 if fl.addgroup(chunkiter, revmap, tr) is None:
1885 raise util.Abort(_("received file revlog group is empty"))
1890 raise util.Abort(_("received file revlog group is empty"))
1886 revisions += fl.count() - o
1891 revisions += fl.count() - o
1887 files += 1
1892 files += 1
1888
1893
1889 cl.writedata()
1894 cl.writedata()
1890 finally:
1895 finally:
1891 if cl:
1896 if cl:
1892 cl.cleanup()
1897 cl.cleanup()
1893
1898
1894 # make changelog see real files again
1899 # make changelog see real files again
1895 self.changelog = changelog.changelog(self.sopener,
1900 self.changelog = changelog.changelog(self.sopener,
1896 self.changelog.version)
1901 self.changelog.version)
1897 self.changelog.checkinlinesize(tr)
1902 self.changelog.checkinlinesize(tr)
1898
1903
1899 newheads = len(self.changelog.heads())
1904 newheads = len(self.changelog.heads())
1900 heads = ""
1905 heads = ""
1901 if oldheads and newheads != oldheads:
1906 if oldheads and newheads != oldheads:
1902 heads = _(" (%+d heads)") % (newheads - oldheads)
1907 heads = _(" (%+d heads)") % (newheads - oldheads)
1903
1908
1904 self.ui.status(_("added %d changesets"
1909 self.ui.status(_("added %d changesets"
1905 " with %d changes to %d files%s\n")
1910 " with %d changes to %d files%s\n")
1906 % (changesets, revisions, files, heads))
1911 % (changesets, revisions, files, heads))
1907
1912
1908 if changesets > 0:
1913 if changesets > 0:
1909 self.hook('pretxnchangegroup', throw=True,
1914 self.hook('pretxnchangegroup', throw=True,
1910 node=hex(self.changelog.node(cor+1)), source=srctype,
1915 node=hex(self.changelog.node(cor+1)), source=srctype,
1911 url=url)
1916 url=url)
1912
1917
1913 tr.close()
1918 tr.close()
1914
1919
1915 if changesets > 0:
1920 if changesets > 0:
1916 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1921 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1917 source=srctype, url=url)
1922 source=srctype, url=url)
1918
1923
1919 for i in xrange(cor + 1, cnr + 1):
1924 for i in xrange(cor + 1, cnr + 1):
1920 self.hook("incoming", node=hex(self.changelog.node(i)),
1925 self.hook("incoming", node=hex(self.changelog.node(i)),
1921 source=srctype, url=url)
1926 source=srctype, url=url)
1922
1927
1923 # never return 0 here:
1928 # never return 0 here:
1924 if newheads < oldheads:
1929 if newheads < oldheads:
1925 return newheads - oldheads - 1
1930 return newheads - oldheads - 1
1926 else:
1931 else:
1927 return newheads - oldheads + 1
1932 return newheads - oldheads + 1
1928
1933
1929
1934
1930 def stream_in(self, remote):
1935 def stream_in(self, remote):
1931 fp = remote.stream_out()
1936 fp = remote.stream_out()
1932 l = fp.readline()
1937 l = fp.readline()
1933 try:
1938 try:
1934 resp = int(l)
1939 resp = int(l)
1935 except ValueError:
1940 except ValueError:
1936 raise util.UnexpectedOutput(
1941 raise util.UnexpectedOutput(
1937 _('Unexpected response from remote server:'), l)
1942 _('Unexpected response from remote server:'), l)
1938 if resp == 1:
1943 if resp == 1:
1939 raise util.Abort(_('operation forbidden by server'))
1944 raise util.Abort(_('operation forbidden by server'))
1940 elif resp == 2:
1945 elif resp == 2:
1941 raise util.Abort(_('locking the remote repository failed'))
1946 raise util.Abort(_('locking the remote repository failed'))
1942 elif resp != 0:
1947 elif resp != 0:
1943 raise util.Abort(_('the server sent an unknown error code'))
1948 raise util.Abort(_('the server sent an unknown error code'))
1944 self.ui.status(_('streaming all changes\n'))
1949 self.ui.status(_('streaming all changes\n'))
1945 l = fp.readline()
1950 l = fp.readline()
1946 try:
1951 try:
1947 total_files, total_bytes = map(int, l.split(' ', 1))
1952 total_files, total_bytes = map(int, l.split(' ', 1))
1948 except ValueError, TypeError:
1953 except ValueError, TypeError:
1949 raise util.UnexpectedOutput(
1954 raise util.UnexpectedOutput(
1950 _('Unexpected response from remote server:'), l)
1955 _('Unexpected response from remote server:'), l)
1951 self.ui.status(_('%d files to transfer, %s of data\n') %
1956 self.ui.status(_('%d files to transfer, %s of data\n') %
1952 (total_files, util.bytecount(total_bytes)))
1957 (total_files, util.bytecount(total_bytes)))
1953 start = time.time()
1958 start = time.time()
1954 for i in xrange(total_files):
1959 for i in xrange(total_files):
1955 # XXX doesn't support '\n' or '\r' in filenames
1960 # XXX doesn't support '\n' or '\r' in filenames
1956 l = fp.readline()
1961 l = fp.readline()
1957 try:
1962 try:
1958 name, size = l.split('\0', 1)
1963 name, size = l.split('\0', 1)
1959 size = int(size)
1964 size = int(size)
1960 except ValueError, TypeError:
1965 except ValueError, TypeError:
1961 raise util.UnexpectedOutput(
1966 raise util.UnexpectedOutput(
1962 _('Unexpected response from remote server:'), l)
1967 _('Unexpected response from remote server:'), l)
1963 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1968 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1964 ofp = self.sopener(name, 'w')
1969 ofp = self.sopener(name, 'w')
1965 for chunk in util.filechunkiter(fp, limit=size):
1970 for chunk in util.filechunkiter(fp, limit=size):
1966 ofp.write(chunk)
1971 ofp.write(chunk)
1967 ofp.close()
1972 ofp.close()
1968 elapsed = time.time() - start
1973 elapsed = time.time() - start
1969 if elapsed <= 0:
1974 if elapsed <= 0:
1970 elapsed = 0.001
1975 elapsed = 0.001
1971 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1976 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1972 (util.bytecount(total_bytes), elapsed,
1977 (util.bytecount(total_bytes), elapsed,
1973 util.bytecount(total_bytes / elapsed)))
1978 util.bytecount(total_bytes / elapsed)))
1974 self.reload()
1979 self.reload()
1975 return len(self.heads()) + 1
1980 return len(self.heads()) + 1
1976
1981
1977 def clone(self, remote, heads=[], stream=False):
1982 def clone(self, remote, heads=[], stream=False):
1978 '''clone remote repository.
1983 '''clone remote repository.
1979
1984
1980 keyword arguments:
1985 keyword arguments:
1981 heads: list of revs to clone (forces use of pull)
1986 heads: list of revs to clone (forces use of pull)
1982 stream: use streaming clone if possible'''
1987 stream: use streaming clone if possible'''
1983
1988
1984 # now, all clients that can request uncompressed clones can
1989 # now, all clients that can request uncompressed clones can
1985 # read repo formats supported by all servers that can serve
1990 # read repo formats supported by all servers that can serve
1986 # them.
1991 # them.
1987
1992
1988 # if revlog format changes, client will have to check version
1993 # if revlog format changes, client will have to check version
1989 # and format flags on "stream" capability, and use
1994 # and format flags on "stream" capability, and use
1990 # uncompressed only if compatible.
1995 # uncompressed only if compatible.
1991
1996
1992 if stream and not heads and remote.capable('stream'):
1997 if stream and not heads and remote.capable('stream'):
1993 return self.stream_in(remote)
1998 return self.stream_in(remote)
1994 return self.pull(remote, heads)
1999 return self.pull(remote, heads)
1995
2000
1996 # used to avoid circular references so destructors work
2001 # used to avoid circular references so destructors work
1997 def aftertrans(files):
2002 def aftertrans(files):
1998 renamefiles = [tuple(t) for t in files]
2003 renamefiles = [tuple(t) for t in files]
1999 def a():
2004 def a():
2000 for src, dest in renamefiles:
2005 for src, dest in renamefiles:
2001 util.rename(src, dest)
2006 util.rename(src, dest)
2002 return a
2007 return a
2003
2008
2004 def instance(ui, path, create):
2009 def instance(ui, path, create):
2005 return localrepository(ui, util.drop_scheme('file', path), create)
2010 return localrepository(ui, util.drop_scheme('file', path), create)
2006
2011
2007 def islocal(path):
2012 def islocal(path):
2008 return True
2013 return True
@@ -1,27 +1,27 b''
1 adding a
1 adding a
2 adding b
2 adding b
3 adding t.h
3 adding t.h
4 adding t/x
4 adding t/x
5 a
5 a
6 NONEXISTENT: No such file or directory
6 NONEXISTENT: No such file or directory
7 a
7 a
8 b
8 b
9 t.h
9 t.h
10 t/x
10 t/x
11 a: No such file or directory
11 a: No such file or directory
12 NONEXISTENT: No such file or directory
12 NONEXISTENT: No such file or directory
13 b
13 b
14 t.h
14 t.h
15 t/x
15 t/x
16 a
16 a
17 NONEXISTENT: No such file in rev ce18e5bc5cd3
17 NONEXISTENT: No such file in rev ce18e5bc5cd3
18 a
18 a
19 t/x
20 b
19 b
21 t.h
20 t.h
21 t/x
22 % -I/-X with relative path should work
22 % -I/-X with relative path should work
23 b
23 b
24 t.h
24 t.h
25 t/x
25 t/x
26 t/x
26 t/x
27 t/x
27 t/x
General Comments 0
You need to be logged in to leave comments. Login now