##// END OF EJS Templates
Add a features list to branches.cache to detect caches of old hg versions....
Thomas Arendsen Hein -
r4168:bbfe5a3f default
parent child Browse files
Show More
@@ -1,1990 +1,2008
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20 branchcache_features = ('unnamed',)
20
21
21 def __del__(self):
22 def __del__(self):
22 self.transhandle = None
23 self.transhandle = None
23 def __init__(self, parentui, path=None, create=0):
24 def __init__(self, parentui, path=None, create=0):
24 repo.repository.__init__(self)
25 repo.repository.__init__(self)
25 if not path:
26 if not path:
26 p = os.getcwd()
27 p = os.getcwd()
27 while not os.path.isdir(os.path.join(p, ".hg")):
28 while not os.path.isdir(os.path.join(p, ".hg")):
28 oldp = p
29 oldp = p
29 p = os.path.dirname(p)
30 p = os.path.dirname(p)
30 if p == oldp:
31 if p == oldp:
31 raise repo.RepoError(_("There is no Mercurial repository"
32 raise repo.RepoError(_("There is no Mercurial repository"
32 " here (.hg not found)"))
33 " here (.hg not found)"))
33 path = p
34 path = p
34
35
35 self.path = os.path.join(path, ".hg")
36 self.path = os.path.join(path, ".hg")
36 self.root = os.path.realpath(path)
37 self.root = os.path.realpath(path)
37 self.origroot = path
38 self.origroot = path
38 self.opener = util.opener(self.path)
39 self.opener = util.opener(self.path)
39 self.wopener = util.opener(self.root)
40 self.wopener = util.opener(self.root)
40
41
41 if not os.path.isdir(self.path):
42 if not os.path.isdir(self.path):
42 if create:
43 if create:
43 if not os.path.exists(path):
44 if not os.path.exists(path):
44 os.mkdir(path)
45 os.mkdir(path)
45 os.mkdir(self.path)
46 os.mkdir(self.path)
46 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
47 requirements = ("revlogv1", "store")
48 requirements = ("revlogv1", "store")
48 reqfile = self.opener("requires", "w")
49 reqfile = self.opener("requires", "w")
49 for r in requirements:
50 for r in requirements:
50 reqfile.write("%s\n" % r)
51 reqfile.write("%s\n" % r)
51 reqfile.close()
52 reqfile.close()
52 # create an invalid changelog
53 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
54 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
55 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
56 ' dummy changelog to prevent using the old repo layout'
56 )
57 )
57 else:
58 else:
58 raise repo.RepoError(_("repository %s not found") % path)
59 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
60 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
61 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
62 else:
62 # find requirements
63 # find requirements
63 try:
64 try:
64 requirements = self.opener("requires").read().splitlines()
65 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
66 except IOError, inst:
66 if inst.errno != errno.ENOENT:
67 if inst.errno != errno.ENOENT:
67 raise
68 raise
68 requirements = []
69 requirements = []
69 # check them
70 # check them
70 for r in requirements:
71 for r in requirements:
71 if r not in self.supported:
72 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
74
74 # setup store
75 # setup store
75 if "store" in requirements:
76 if "store" in requirements:
76 self.encodefn = util.encodefilename
77 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
78 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
79 self.spath = os.path.join(self.path, "store")
79 else:
80 else:
80 self.encodefn = lambda x: x
81 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.decodefn = lambda x: x
82 self.spath = self.path
83 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
85
85 self.ui = ui.ui(parentui=parentui)
86 self.ui = ui.ui(parentui=parentui)
86 try:
87 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
89 except IOError:
89 pass
90 pass
90
91
91 v = self.ui.configrevlog()
92 v = self.ui.configrevlog()
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 fl = v.get('flags', None)
95 fl = v.get('flags', None)
95 flags = 0
96 flags = 0
96 if fl != None:
97 if fl != None:
97 for x in fl.split():
98 for x in fl.split():
98 flags |= revlog.flagstr(x)
99 flags |= revlog.flagstr(x)
99 elif self.revlogv1:
100 elif self.revlogv1:
100 flags = revlog.REVLOG_DEFAULT_FLAGS
101 flags = revlog.REVLOG_DEFAULT_FLAGS
101
102
102 v = self.revlogversion | flags
103 v = self.revlogversion | flags
103 self.manifest = manifest.manifest(self.sopener, v)
104 self.manifest = manifest.manifest(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
105 self.changelog = changelog.changelog(self.sopener, v)
105
106
106 fallback = self.ui.config('ui', 'fallbackencoding')
107 fallback = self.ui.config('ui', 'fallbackencoding')
107 if fallback:
108 if fallback:
108 util._fallbackencoding = fallback
109 util._fallbackencoding = fallback
109
110
110 # the changelog might not have the inline index flag
111 # the changelog might not have the inline index flag
111 # on. If the format of the changelog is the same as found in
112 # on. If the format of the changelog is the same as found in
112 # .hgrc, apply any flags found in the .hgrc as well.
113 # .hgrc, apply any flags found in the .hgrc as well.
113 # Otherwise, just version from the changelog
114 # Otherwise, just version from the changelog
114 v = self.changelog.version
115 v = self.changelog.version
115 if v == self.revlogversion:
116 if v == self.revlogversion:
116 v |= flags
117 v |= flags
117 self.revlogversion = v
118 self.revlogversion = v
118
119
119 self.tagscache = None
120 self.tagscache = None
120 self.branchcache = None
121 self.branchcache = None
121 self.nodetagscache = None
122 self.nodetagscache = None
122 self.encodepats = None
123 self.encodepats = None
123 self.decodepats = None
124 self.decodepats = None
124 self.transhandle = None
125 self.transhandle = None
125
126
126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127
128
128 def url(self):
129 def url(self):
129 return 'file:' + self.root
130 return 'file:' + self.root
130
131
131 def hook(self, name, throw=False, **args):
132 def hook(self, name, throw=False, **args):
132 def callhook(hname, funcname):
133 def callhook(hname, funcname):
133 '''call python hook. hook is callable object, looked up as
134 '''call python hook. hook is callable object, looked up as
134 name in python module. if callable returns "true", hook
135 name in python module. if callable returns "true", hook
135 fails, else passes. if hook raises exception, treated as
136 fails, else passes. if hook raises exception, treated as
136 hook failure. exception propagates if throw is "true".
137 hook failure. exception propagates if throw is "true".
137
138
138 reason for "true" meaning "hook failed" is so that
139 reason for "true" meaning "hook failed" is so that
139 unmodified commands (e.g. mercurial.commands.update) can
140 unmodified commands (e.g. mercurial.commands.update) can
140 be run as hooks without wrappers to convert return values.'''
141 be run as hooks without wrappers to convert return values.'''
141
142
142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 d = funcname.rfind('.')
144 d = funcname.rfind('.')
144 if d == -1:
145 if d == -1:
145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 % (hname, funcname))
147 % (hname, funcname))
147 modname = funcname[:d]
148 modname = funcname[:d]
148 try:
149 try:
149 obj = __import__(modname)
150 obj = __import__(modname)
150 except ImportError:
151 except ImportError:
151 try:
152 try:
152 # extensions are loaded with hgext_ prefix
153 # extensions are loaded with hgext_ prefix
153 obj = __import__("hgext_%s" % modname)
154 obj = __import__("hgext_%s" % modname)
154 except ImportError:
155 except ImportError:
155 raise util.Abort(_('%s hook is invalid '
156 raise util.Abort(_('%s hook is invalid '
156 '(import of "%s" failed)') %
157 '(import of "%s" failed)') %
157 (hname, modname))
158 (hname, modname))
158 try:
159 try:
159 for p in funcname.split('.')[1:]:
160 for p in funcname.split('.')[1:]:
160 obj = getattr(obj, p)
161 obj = getattr(obj, p)
161 except AttributeError, err:
162 except AttributeError, err:
162 raise util.Abort(_('%s hook is invalid '
163 raise util.Abort(_('%s hook is invalid '
163 '("%s" is not defined)') %
164 '("%s" is not defined)') %
164 (hname, funcname))
165 (hname, funcname))
165 if not callable(obj):
166 if not callable(obj):
166 raise util.Abort(_('%s hook is invalid '
167 raise util.Abort(_('%s hook is invalid '
167 '("%s" is not callable)') %
168 '("%s" is not callable)') %
168 (hname, funcname))
169 (hname, funcname))
169 try:
170 try:
170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 except (KeyboardInterrupt, util.SignalInterrupt):
172 except (KeyboardInterrupt, util.SignalInterrupt):
172 raise
173 raise
173 except Exception, exc:
174 except Exception, exc:
174 if isinstance(exc, util.Abort):
175 if isinstance(exc, util.Abort):
175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 self.ui.warn(_('error: %s hook failed: %s\n') %
176 (hname, exc.args[0]))
177 (hname, exc.args[0]))
177 else:
178 else:
178 self.ui.warn(_('error: %s hook raised an exception: '
179 self.ui.warn(_('error: %s hook raised an exception: '
179 '%s\n') % (hname, exc))
180 '%s\n') % (hname, exc))
180 if throw:
181 if throw:
181 raise
182 raise
182 self.ui.print_exc()
183 self.ui.print_exc()
183 return True
184 return True
184 if r:
185 if r:
185 if throw:
186 if throw:
186 raise util.Abort(_('%s hook failed') % hname)
187 raise util.Abort(_('%s hook failed') % hname)
187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 return r
189 return r
189
190
190 def runhook(name, cmd):
191 def runhook(name, cmd):
191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 r = util.system(cmd, environ=env, cwd=self.root)
194 r = util.system(cmd, environ=env, cwd=self.root)
194 if r:
195 if r:
195 desc, r = util.explain_exit(r)
196 desc, r = util.explain_exit(r)
196 if throw:
197 if throw:
197 raise util.Abort(_('%s hook %s') % (name, desc))
198 raise util.Abort(_('%s hook %s') % (name, desc))
198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 return r
200 return r
200
201
201 r = False
202 r = False
202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 if hname.split(".", 1)[0] == name and cmd]
204 if hname.split(".", 1)[0] == name and cmd]
204 hooks.sort()
205 hooks.sort()
205 for hname, cmd in hooks:
206 for hname, cmd in hooks:
206 if cmd.startswith('python:'):
207 if cmd.startswith('python:'):
207 r = callhook(hname, cmd[7:].strip()) or r
208 r = callhook(hname, cmd[7:].strip()) or r
208 else:
209 else:
209 r = runhook(hname, cmd) or r
210 r = runhook(hname, cmd) or r
210 return r
211 return r
211
212
212 tag_disallowed = ':\r\n'
213 tag_disallowed = ':\r\n'
213
214
214 def tag(self, name, node, message, local, user, date):
215 def tag(self, name, node, message, local, user, date):
215 '''tag a revision with a symbolic name.
216 '''tag a revision with a symbolic name.
216
217
217 if local is True, the tag is stored in a per-repository file.
218 if local is True, the tag is stored in a per-repository file.
218 otherwise, it is stored in the .hgtags file, and a new
219 otherwise, it is stored in the .hgtags file, and a new
219 changeset is committed with the change.
220 changeset is committed with the change.
220
221
221 keyword arguments:
222 keyword arguments:
222
223
223 local: whether to store tag in non-version-controlled file
224 local: whether to store tag in non-version-controlled file
224 (default False)
225 (default False)
225
226
226 message: commit message to use if committing
227 message: commit message to use if committing
227
228
228 user: name of user to use if committing
229 user: name of user to use if committing
229
230
230 date: date tuple to use if committing'''
231 date: date tuple to use if committing'''
231
232
232 for c in self.tag_disallowed:
233 for c in self.tag_disallowed:
233 if c in name:
234 if c in name:
234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235 raise util.Abort(_('%r cannot be used in a tag name') % c)
235
236
236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237
238
238 if local:
239 if local:
239 # local tags are stored in the current charset
240 # local tags are stored in the current charset
240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 self.hook('tag', node=hex(node), tag=name, local=local)
242 self.hook('tag', node=hex(node), tag=name, local=local)
242 return
243 return
243
244
244 for x in self.status()[:5]:
245 for x in self.status()[:5]:
245 if '.hgtags' in x:
246 if '.hgtags' in x:
246 raise util.Abort(_('working copy of .hgtags is changed '
247 raise util.Abort(_('working copy of .hgtags is changed '
247 '(please commit .hgtags manually)'))
248 '(please commit .hgtags manually)'))
248
249
249 # committed tags are stored in UTF-8
250 # committed tags are stored in UTF-8
250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 self.wfile('.hgtags', 'ab').write(line)
252 self.wfile('.hgtags', 'ab').write(line)
252 if self.dirstate.state('.hgtags') == '?':
253 if self.dirstate.state('.hgtags') == '?':
253 self.add(['.hgtags'])
254 self.add(['.hgtags'])
254
255
255 self.commit(['.hgtags'], message, user, date)
256 self.commit(['.hgtags'], message, user, date)
256 self.hook('tag', node=hex(node), tag=name, local=local)
257 self.hook('tag', node=hex(node), tag=name, local=local)
257
258
258 def tags(self):
259 def tags(self):
259 '''return a mapping of tag to node'''
260 '''return a mapping of tag to node'''
260 if not self.tagscache:
261 if not self.tagscache:
261 self.tagscache = {}
262 self.tagscache = {}
262
263
263 def parsetag(line, context):
264 def parsetag(line, context):
264 if not line:
265 if not line:
265 return
266 return
266 s = l.split(" ", 1)
267 s = l.split(" ", 1)
267 if len(s) != 2:
268 if len(s) != 2:
268 self.ui.warn(_("%s: cannot parse entry\n") % context)
269 self.ui.warn(_("%s: cannot parse entry\n") % context)
269 return
270 return
270 node, key = s
271 node, key = s
271 key = util.tolocal(key.strip()) # stored in UTF-8
272 key = util.tolocal(key.strip()) # stored in UTF-8
272 try:
273 try:
273 bin_n = bin(node)
274 bin_n = bin(node)
274 except TypeError:
275 except TypeError:
275 self.ui.warn(_("%s: node '%s' is not well formed\n") %
276 self.ui.warn(_("%s: node '%s' is not well formed\n") %
276 (context, node))
277 (context, node))
277 return
278 return
278 if bin_n not in self.changelog.nodemap:
279 if bin_n not in self.changelog.nodemap:
279 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
280 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
280 (context, key))
281 (context, key))
281 return
282 return
282 self.tagscache[key] = bin_n
283 self.tagscache[key] = bin_n
283
284
284 # read the tags file from each head, ending with the tip,
285 # read the tags file from each head, ending with the tip,
285 # and add each tag found to the map, with "newer" ones
286 # and add each tag found to the map, with "newer" ones
286 # taking precedence
287 # taking precedence
287 f = None
288 f = None
288 for rev, node, fnode in self._hgtagsnodes():
289 for rev, node, fnode in self._hgtagsnodes():
289 f = (f and f.filectx(fnode) or
290 f = (f and f.filectx(fnode) or
290 self.filectx('.hgtags', fileid=fnode))
291 self.filectx('.hgtags', fileid=fnode))
291 count = 0
292 count = 0
292 for l in f.data().splitlines():
293 for l in f.data().splitlines():
293 count += 1
294 count += 1
294 parsetag(l, _("%s, line %d") % (str(f), count))
295 parsetag(l, _("%s, line %d") % (str(f), count))
295
296
296 try:
297 try:
297 f = self.opener("localtags")
298 f = self.opener("localtags")
298 count = 0
299 count = 0
299 for l in f:
300 for l in f:
300 # localtags are stored in the local character set
301 # localtags are stored in the local character set
301 # while the internal tag table is stored in UTF-8
302 # while the internal tag table is stored in UTF-8
302 l = util.fromlocal(l)
303 l = util.fromlocal(l)
303 count += 1
304 count += 1
304 parsetag(l, _("localtags, line %d") % count)
305 parsetag(l, _("localtags, line %d") % count)
305 except IOError:
306 except IOError:
306 pass
307 pass
307
308
308 self.tagscache['tip'] = self.changelog.tip()
309 self.tagscache['tip'] = self.changelog.tip()
309
310
310 return self.tagscache
311 return self.tagscache
311
312
312 def _hgtagsnodes(self):
313 def _hgtagsnodes(self):
313 heads = self.heads()
314 heads = self.heads()
314 heads.reverse()
315 heads.reverse()
315 last = {}
316 last = {}
316 ret = []
317 ret = []
317 for node in heads:
318 for node in heads:
318 c = self.changectx(node)
319 c = self.changectx(node)
319 rev = c.rev()
320 rev = c.rev()
320 try:
321 try:
321 fnode = c.filenode('.hgtags')
322 fnode = c.filenode('.hgtags')
322 except repo.LookupError:
323 except repo.LookupError:
323 continue
324 continue
324 ret.append((rev, node, fnode))
325 ret.append((rev, node, fnode))
325 if fnode in last:
326 if fnode in last:
326 ret[last[fnode]] = None
327 ret[last[fnode]] = None
327 last[fnode] = len(ret) - 1
328 last[fnode] = len(ret) - 1
328 return [item for item in ret if item]
329 return [item for item in ret if item]
329
330
330 def tagslist(self):
331 def tagslist(self):
331 '''return a list of tags ordered by revision'''
332 '''return a list of tags ordered by revision'''
332 l = []
333 l = []
333 for t, n in self.tags().items():
334 for t, n in self.tags().items():
334 try:
335 try:
335 r = self.changelog.rev(n)
336 r = self.changelog.rev(n)
336 except:
337 except:
337 r = -2 # sort to the beginning of the list if unknown
338 r = -2 # sort to the beginning of the list if unknown
338 l.append((r, t, n))
339 l.append((r, t, n))
339 l.sort()
340 l.sort()
340 return [(t, n) for r, t, n in l]
341 return [(t, n) for r, t, n in l]
341
342
342 def nodetags(self, node):
343 def nodetags(self, node):
343 '''return the tags associated with a node'''
344 '''return the tags associated with a node'''
344 if not self.nodetagscache:
345 if not self.nodetagscache:
345 self.nodetagscache = {}
346 self.nodetagscache = {}
346 for t, n in self.tags().items():
347 for t, n in self.tags().items():
347 self.nodetagscache.setdefault(n, []).append(t)
348 self.nodetagscache.setdefault(n, []).append(t)
348 return self.nodetagscache.get(node, [])
349 return self.nodetagscache.get(node, [])
349
350
350 def _branchtags(self):
351 def _branchtags(self):
351 partial, last, lrev = self._readbranchcache()
352 partial, last, lrev = self._readbranchcache()
352
353
353 tiprev = self.changelog.count() - 1
354 tiprev = self.changelog.count() - 1
354 if lrev != tiprev:
355 if lrev != tiprev:
355 self._updatebranchcache(partial, lrev+1, tiprev+1)
356 self._updatebranchcache(partial, lrev+1, tiprev+1)
356 self._writebranchcache(partial, self.changelog.tip(), tiprev)
357 self._writebranchcache(partial, self.changelog.tip(), tiprev)
357
358
358 return partial
359 return partial
359
360
360 def branchtags(self):
361 def branchtags(self):
361 if self.branchcache is not None:
362 if self.branchcache is not None:
362 return self.branchcache
363 return self.branchcache
363
364
364 self.branchcache = {} # avoid recursion in changectx
365 self.branchcache = {} # avoid recursion in changectx
365 partial = self._branchtags()
366 partial = self._branchtags()
366
367
367 # the branch cache is stored on disk as UTF-8, but in the local
368 # the branch cache is stored on disk as UTF-8, but in the local
368 # charset internally
369 # charset internally
369 for k, v in partial.items():
370 for k, v in partial.items():
370 self.branchcache[util.tolocal(k)] = v
371 self.branchcache[util.tolocal(k)] = v
371 return self.branchcache
372 return self.branchcache
372
373
373 def _readbranchcache(self):
374 def _readbranchcache(self):
374 partial = {}
375 partial = {}
375 try:
376 try:
376 f = self.opener("branches.cache")
377 f = self.opener("branches.cache")
377 lines = f.read().split('\n')
378 lines = f.read().split('\n')
378 f.close()
379 f.close()
380 features = lines.pop(0).strip()
381 if not features.startswith('features: '):
382 raise ValueError(_('branch cache: no features specified'))
383 features = features.split(' ', 1)[1].split()
384 missing_features = []
385 for feature in self.branchcache_features:
386 try:
387 features.remove(feature)
388 except ValueError, inst:
389 missing_features.append(feature)
390 if missing_features:
391 raise ValueError(_('branch cache: missing features: %s')
392 % ', '.join(missing_features))
393 if features:
394 raise ValueError(_('branch cache: unknown features: %s')
395 % ', '.join(features))
379 last, lrev = lines.pop(0).split(" ", 1)
396 last, lrev = lines.pop(0).split(" ", 1)
380 last, lrev = bin(last), int(lrev)
397 last, lrev = bin(last), int(lrev)
381 if not (lrev < self.changelog.count() and
398 if not (lrev < self.changelog.count() and
382 self.changelog.node(lrev) == last): # sanity check
399 self.changelog.node(lrev) == last): # sanity check
383 # invalidate the cache
400 # invalidate the cache
384 raise ValueError('Invalid branch cache: unknown tip')
401 raise ValueError('Invalid branch cache: unknown tip')
385 for l in lines:
402 for l in lines:
386 if not l: continue
403 if not l: continue
387 node, label = l.split(" ", 1)
404 node, label = l.split(" ", 1)
388 partial[label.strip()] = bin(node)
405 partial[label.strip()] = bin(node)
389 except (KeyboardInterrupt, util.SignalInterrupt):
406 except (KeyboardInterrupt, util.SignalInterrupt):
390 raise
407 raise
391 except Exception, inst:
408 except Exception, inst:
392 if self.ui.debugflag:
409 if self.ui.debugflag:
393 self.ui.warn(str(inst), '\n')
410 self.ui.warn(str(inst), '\n')
394 partial, last, lrev = {}, nullid, nullrev
411 partial, last, lrev = {}, nullid, nullrev
395 return partial, last, lrev
412 return partial, last, lrev
396
413
397 def _writebranchcache(self, branches, tip, tiprev):
414 def _writebranchcache(self, branches, tip, tiprev):
398 try:
415 try:
399 f = self.opener("branches.cache", "w")
416 f = self.opener("branches.cache", "w")
417 f.write(" features: %s\n" % ' '.join(self.branchcache_features))
400 f.write("%s %s\n" % (hex(tip), tiprev))
418 f.write("%s %s\n" % (hex(tip), tiprev))
401 for label, node in branches.iteritems():
419 for label, node in branches.iteritems():
402 f.write("%s %s\n" % (hex(node), label))
420 f.write("%s %s\n" % (hex(node), label))
403 except IOError:
421 except IOError:
404 pass
422 pass
405
423
406 def _updatebranchcache(self, partial, start, end):
424 def _updatebranchcache(self, partial, start, end):
407 for r in xrange(start, end):
425 for r in xrange(start, end):
408 c = self.changectx(r)
426 c = self.changectx(r)
409 b = c.branch()
427 b = c.branch()
410 partial[b] = c.node()
428 partial[b] = c.node()
411
429
412 def lookup(self, key):
430 def lookup(self, key):
413 if key == '.':
431 if key == '.':
414 key = self.dirstate.parents()[0]
432 key = self.dirstate.parents()[0]
415 if key == nullid:
433 if key == nullid:
416 raise repo.RepoError(_("no revision checked out"))
434 raise repo.RepoError(_("no revision checked out"))
417 elif key == 'null':
435 elif key == 'null':
418 return nullid
436 return nullid
419 n = self.changelog._match(key)
437 n = self.changelog._match(key)
420 if n:
438 if n:
421 return n
439 return n
422 if key in self.tags():
440 if key in self.tags():
423 return self.tags()[key]
441 return self.tags()[key]
424 if key in self.branchtags():
442 if key in self.branchtags():
425 return self.branchtags()[key]
443 return self.branchtags()[key]
426 n = self.changelog._partialmatch(key)
444 n = self.changelog._partialmatch(key)
427 if n:
445 if n:
428 return n
446 return n
429 raise repo.RepoError(_("unknown revision '%s'") % key)
447 raise repo.RepoError(_("unknown revision '%s'") % key)
430
448
431 def dev(self):
449 def dev(self):
432 return os.lstat(self.path).st_dev
450 return os.lstat(self.path).st_dev
433
451
434 def local(self):
452 def local(self):
435 return True
453 return True
436
454
437 def join(self, f):
455 def join(self, f):
438 return os.path.join(self.path, f)
456 return os.path.join(self.path, f)
439
457
440 def sjoin(self, f):
458 def sjoin(self, f):
441 f = self.encodefn(f)
459 f = self.encodefn(f)
442 return os.path.join(self.spath, f)
460 return os.path.join(self.spath, f)
443
461
444 def wjoin(self, f):
462 def wjoin(self, f):
445 return os.path.join(self.root, f)
463 return os.path.join(self.root, f)
446
464
447 def file(self, f):
465 def file(self, f):
448 if f[0] == '/':
466 if f[0] == '/':
449 f = f[1:]
467 f = f[1:]
450 return filelog.filelog(self.sopener, f, self.revlogversion)
468 return filelog.filelog(self.sopener, f, self.revlogversion)
451
469
452 def changectx(self, changeid=None):
470 def changectx(self, changeid=None):
453 return context.changectx(self, changeid)
471 return context.changectx(self, changeid)
454
472
455 def workingctx(self):
473 def workingctx(self):
456 return context.workingctx(self)
474 return context.workingctx(self)
457
475
458 def parents(self, changeid=None):
476 def parents(self, changeid=None):
459 '''
477 '''
460 get list of changectxs for parents of changeid or working directory
478 get list of changectxs for parents of changeid or working directory
461 '''
479 '''
462 if changeid is None:
480 if changeid is None:
463 pl = self.dirstate.parents()
481 pl = self.dirstate.parents()
464 else:
482 else:
465 n = self.changelog.lookup(changeid)
483 n = self.changelog.lookup(changeid)
466 pl = self.changelog.parents(n)
484 pl = self.changelog.parents(n)
467 if pl[1] == nullid:
485 if pl[1] == nullid:
468 return [self.changectx(pl[0])]
486 return [self.changectx(pl[0])]
469 return [self.changectx(pl[0]), self.changectx(pl[1])]
487 return [self.changectx(pl[0]), self.changectx(pl[1])]
470
488
471 def filectx(self, path, changeid=None, fileid=None):
489 def filectx(self, path, changeid=None, fileid=None):
472 """changeid can be a changeset revision, node, or tag.
490 """changeid can be a changeset revision, node, or tag.
473 fileid can be a file revision or node."""
491 fileid can be a file revision or node."""
474 return context.filectx(self, path, changeid, fileid)
492 return context.filectx(self, path, changeid, fileid)
475
493
476 def getcwd(self):
494 def getcwd(self):
477 return self.dirstate.getcwd()
495 return self.dirstate.getcwd()
478
496
479 def wfile(self, f, mode='r'):
497 def wfile(self, f, mode='r'):
480 return self.wopener(f, mode)
498 return self.wopener(f, mode)
481
499
482 def wread(self, filename):
500 def wread(self, filename):
483 if self.encodepats == None:
501 if self.encodepats == None:
484 l = []
502 l = []
485 for pat, cmd in self.ui.configitems("encode"):
503 for pat, cmd in self.ui.configitems("encode"):
486 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
487 l.append((mf, cmd))
505 l.append((mf, cmd))
488 self.encodepats = l
506 self.encodepats = l
489
507
490 data = self.wopener(filename, 'r').read()
508 data = self.wopener(filename, 'r').read()
491
509
492 for mf, cmd in self.encodepats:
510 for mf, cmd in self.encodepats:
493 if mf(filename):
511 if mf(filename):
494 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
495 data = util.filter(data, cmd)
513 data = util.filter(data, cmd)
496 break
514 break
497
515
498 return data
516 return data
499
517
500 def wwrite(self, filename, data, fd=None):
518 def wwrite(self, filename, data, fd=None):
501 if self.decodepats == None:
519 if self.decodepats == None:
502 l = []
520 l = []
503 for pat, cmd in self.ui.configitems("decode"):
521 for pat, cmd in self.ui.configitems("decode"):
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
522 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 l.append((mf, cmd))
523 l.append((mf, cmd))
506 self.decodepats = l
524 self.decodepats = l
507
525
508 for mf, cmd in self.decodepats:
526 for mf, cmd in self.decodepats:
509 if mf(filename):
527 if mf(filename):
510 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
528 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
511 data = util.filter(data, cmd)
529 data = util.filter(data, cmd)
512 break
530 break
513
531
514 if fd:
532 if fd:
515 return fd.write(data)
533 return fd.write(data)
516 return self.wopener(filename, 'w').write(data)
534 return self.wopener(filename, 'w').write(data)
517
535
518 def transaction(self):
536 def transaction(self):
519 tr = self.transhandle
537 tr = self.transhandle
520 if tr != None and tr.running():
538 if tr != None and tr.running():
521 return tr.nest()
539 return tr.nest()
522
540
523 # save dirstate for rollback
541 # save dirstate for rollback
524 try:
542 try:
525 ds = self.opener("dirstate").read()
543 ds = self.opener("dirstate").read()
526 except IOError:
544 except IOError:
527 ds = ""
545 ds = ""
528 self.opener("journal.dirstate", "w").write(ds)
546 self.opener("journal.dirstate", "w").write(ds)
529
547
530 renames = [(self.sjoin("journal"), self.sjoin("undo")),
548 renames = [(self.sjoin("journal"), self.sjoin("undo")),
531 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
549 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
532 tr = transaction.transaction(self.ui.warn, self.sopener,
550 tr = transaction.transaction(self.ui.warn, self.sopener,
533 self.sjoin("journal"),
551 self.sjoin("journal"),
534 aftertrans(renames))
552 aftertrans(renames))
535 self.transhandle = tr
553 self.transhandle = tr
536 return tr
554 return tr
537
555
538 def recover(self):
556 def recover(self):
539 l = self.lock()
557 l = self.lock()
540 if os.path.exists(self.sjoin("journal")):
558 if os.path.exists(self.sjoin("journal")):
541 self.ui.status(_("rolling back interrupted transaction\n"))
559 self.ui.status(_("rolling back interrupted transaction\n"))
542 transaction.rollback(self.sopener, self.sjoin("journal"))
560 transaction.rollback(self.sopener, self.sjoin("journal"))
543 self.reload()
561 self.reload()
544 return True
562 return True
545 else:
563 else:
546 self.ui.warn(_("no interrupted transaction available\n"))
564 self.ui.warn(_("no interrupted transaction available\n"))
547 return False
565 return False
548
566
549 def rollback(self, wlock=None):
567 def rollback(self, wlock=None):
550 if not wlock:
568 if not wlock:
551 wlock = self.wlock()
569 wlock = self.wlock()
552 l = self.lock()
570 l = self.lock()
553 if os.path.exists(self.sjoin("undo")):
571 if os.path.exists(self.sjoin("undo")):
554 self.ui.status(_("rolling back last transaction\n"))
572 self.ui.status(_("rolling back last transaction\n"))
555 transaction.rollback(self.sopener, self.sjoin("undo"))
573 transaction.rollback(self.sopener, self.sjoin("undo"))
556 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
574 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
557 self.reload()
575 self.reload()
558 self.wreload()
576 self.wreload()
559 else:
577 else:
560 self.ui.warn(_("no rollback information available\n"))
578 self.ui.warn(_("no rollback information available\n"))
561
579
562 def wreload(self):
580 def wreload(self):
563 self.dirstate.read()
581 self.dirstate.read()
564
582
565 def reload(self):
583 def reload(self):
566 self.changelog.load()
584 self.changelog.load()
567 self.manifest.load()
585 self.manifest.load()
568 self.tagscache = None
586 self.tagscache = None
569 self.nodetagscache = None
587 self.nodetagscache = None
570
588
571 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
589 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
572 desc=None):
590 desc=None):
573 try:
591 try:
574 l = lock.lock(lockname, 0, releasefn, desc=desc)
592 l = lock.lock(lockname, 0, releasefn, desc=desc)
575 except lock.LockHeld, inst:
593 except lock.LockHeld, inst:
576 if not wait:
594 if not wait:
577 raise
595 raise
578 self.ui.warn(_("waiting for lock on %s held by %r\n") %
596 self.ui.warn(_("waiting for lock on %s held by %r\n") %
579 (desc, inst.locker))
597 (desc, inst.locker))
580 # default to 600 seconds timeout
598 # default to 600 seconds timeout
581 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
599 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
582 releasefn, desc=desc)
600 releasefn, desc=desc)
583 if acquirefn:
601 if acquirefn:
584 acquirefn()
602 acquirefn()
585 return l
603 return l
586
604
587 def lock(self, wait=1):
605 def lock(self, wait=1):
588 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
606 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
589 desc=_('repository %s') % self.origroot)
607 desc=_('repository %s') % self.origroot)
590
608
591 def wlock(self, wait=1):
609 def wlock(self, wait=1):
592 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
610 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
593 self.wreload,
611 self.wreload,
594 desc=_('working directory of %s') % self.origroot)
612 desc=_('working directory of %s') % self.origroot)
595
613
596 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
614 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
597 """
615 """
598 commit an individual file as part of a larger transaction
616 commit an individual file as part of a larger transaction
599 """
617 """
600
618
601 t = self.wread(fn)
619 t = self.wread(fn)
602 fl = self.file(fn)
620 fl = self.file(fn)
603 fp1 = manifest1.get(fn, nullid)
621 fp1 = manifest1.get(fn, nullid)
604 fp2 = manifest2.get(fn, nullid)
622 fp2 = manifest2.get(fn, nullid)
605
623
606 meta = {}
624 meta = {}
607 cp = self.dirstate.copied(fn)
625 cp = self.dirstate.copied(fn)
608 if cp:
626 if cp:
609 # Mark the new revision of this file as a copy of another
627 # Mark the new revision of this file as a copy of another
610 # file. This copy data will effectively act as a parent
628 # file. This copy data will effectively act as a parent
611 # of this new revision. If this is a merge, the first
629 # of this new revision. If this is a merge, the first
612 # parent will be the nullid (meaning "look up the copy data")
630 # parent will be the nullid (meaning "look up the copy data")
613 # and the second one will be the other parent. For example:
631 # and the second one will be the other parent. For example:
614 #
632 #
615 # 0 --- 1 --- 3 rev1 changes file foo
633 # 0 --- 1 --- 3 rev1 changes file foo
616 # \ / rev2 renames foo to bar and changes it
634 # \ / rev2 renames foo to bar and changes it
617 # \- 2 -/ rev3 should have bar with all changes and
635 # \- 2 -/ rev3 should have bar with all changes and
618 # should record that bar descends from
636 # should record that bar descends from
619 # bar in rev2 and foo in rev1
637 # bar in rev2 and foo in rev1
620 #
638 #
621 # this allows this merge to succeed:
639 # this allows this merge to succeed:
622 #
640 #
623 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
641 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
624 # \ / merging rev3 and rev4 should use bar@rev2
642 # \ / merging rev3 and rev4 should use bar@rev2
625 # \- 2 --- 4 as the merge base
643 # \- 2 --- 4 as the merge base
626 #
644 #
627 meta["copy"] = cp
645 meta["copy"] = cp
628 if not manifest2: # not a branch merge
646 if not manifest2: # not a branch merge
629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
647 meta["copyrev"] = hex(manifest1.get(cp, nullid))
630 fp2 = nullid
648 fp2 = nullid
631 elif fp2 != nullid: # copied on remote side
649 elif fp2 != nullid: # copied on remote side
632 meta["copyrev"] = hex(manifest1.get(cp, nullid))
650 meta["copyrev"] = hex(manifest1.get(cp, nullid))
633 elif fp1 != nullid: # copied on local side, reversed
651 elif fp1 != nullid: # copied on local side, reversed
634 meta["copyrev"] = hex(manifest2.get(cp))
652 meta["copyrev"] = hex(manifest2.get(cp))
635 fp2 = fp1
653 fp2 = fp1
636 else: # directory rename
654 else: # directory rename
637 meta["copyrev"] = hex(manifest1.get(cp, nullid))
655 meta["copyrev"] = hex(manifest1.get(cp, nullid))
638 self.ui.debug(_(" %s: copy %s:%s\n") %
656 self.ui.debug(_(" %s: copy %s:%s\n") %
639 (fn, cp, meta["copyrev"]))
657 (fn, cp, meta["copyrev"]))
640 fp1 = nullid
658 fp1 = nullid
641 elif fp2 != nullid:
659 elif fp2 != nullid:
642 # is one parent an ancestor of the other?
660 # is one parent an ancestor of the other?
643 fpa = fl.ancestor(fp1, fp2)
661 fpa = fl.ancestor(fp1, fp2)
644 if fpa == fp1:
662 if fpa == fp1:
645 fp1, fp2 = fp2, nullid
663 fp1, fp2 = fp2, nullid
646 elif fpa == fp2:
664 elif fpa == fp2:
647 fp2 = nullid
665 fp2 = nullid
648
666
649 # is the file unmodified from the parent? report existing entry
667 # is the file unmodified from the parent? report existing entry
650 if fp2 == nullid and not fl.cmp(fp1, t):
668 if fp2 == nullid and not fl.cmp(fp1, t):
651 return fp1
669 return fp1
652
670
653 changelist.append(fn)
671 changelist.append(fn)
654 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
672 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
655
673
656 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
674 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
657 if p1 is None:
675 if p1 is None:
658 p1, p2 = self.dirstate.parents()
676 p1, p2 = self.dirstate.parents()
659 return self.commit(files=files, text=text, user=user, date=date,
677 return self.commit(files=files, text=text, user=user, date=date,
660 p1=p1, p2=p2, wlock=wlock)
678 p1=p1, p2=p2, wlock=wlock)
661
679
662 def commit(self, files=None, text="", user=None, date=None,
680 def commit(self, files=None, text="", user=None, date=None,
663 match=util.always, force=False, lock=None, wlock=None,
681 match=util.always, force=False, lock=None, wlock=None,
664 force_editor=False, p1=None, p2=None, extra={}):
682 force_editor=False, p1=None, p2=None, extra={}):
665
683
666 commit = []
684 commit = []
667 remove = []
685 remove = []
668 changed = []
686 changed = []
669 use_dirstate = (p1 is None) # not rawcommit
687 use_dirstate = (p1 is None) # not rawcommit
670 extra = extra.copy()
688 extra = extra.copy()
671
689
672 if use_dirstate:
690 if use_dirstate:
673 if files:
691 if files:
674 for f in files:
692 for f in files:
675 s = self.dirstate.state(f)
693 s = self.dirstate.state(f)
676 if s in 'nmai':
694 if s in 'nmai':
677 commit.append(f)
695 commit.append(f)
678 elif s == 'r':
696 elif s == 'r':
679 remove.append(f)
697 remove.append(f)
680 else:
698 else:
681 self.ui.warn(_("%s not tracked!\n") % f)
699 self.ui.warn(_("%s not tracked!\n") % f)
682 else:
700 else:
683 changes = self.status(match=match)[:5]
701 changes = self.status(match=match)[:5]
684 modified, added, removed, deleted, unknown = changes
702 modified, added, removed, deleted, unknown = changes
685 commit = modified + added
703 commit = modified + added
686 remove = removed
704 remove = removed
687 else:
705 else:
688 commit = files
706 commit = files
689
707
690 if use_dirstate:
708 if use_dirstate:
691 p1, p2 = self.dirstate.parents()
709 p1, p2 = self.dirstate.parents()
692 update_dirstate = True
710 update_dirstate = True
693 else:
711 else:
694 p1, p2 = p1, p2 or nullid
712 p1, p2 = p1, p2 or nullid
695 update_dirstate = (self.dirstate.parents()[0] == p1)
713 update_dirstate = (self.dirstate.parents()[0] == p1)
696
714
697 c1 = self.changelog.read(p1)
715 c1 = self.changelog.read(p1)
698 c2 = self.changelog.read(p2)
716 c2 = self.changelog.read(p2)
699 m1 = self.manifest.read(c1[0]).copy()
717 m1 = self.manifest.read(c1[0]).copy()
700 m2 = self.manifest.read(c2[0])
718 m2 = self.manifest.read(c2[0])
701
719
702 if use_dirstate:
720 if use_dirstate:
703 branchname = self.workingctx().branch()
721 branchname = self.workingctx().branch()
704 try:
722 try:
705 branchname = branchname.decode('UTF-8').encode('UTF-8')
723 branchname = branchname.decode('UTF-8').encode('UTF-8')
706 except UnicodeDecodeError:
724 except UnicodeDecodeError:
707 raise util.Abort(_('branch name not in UTF-8!'))
725 raise util.Abort(_('branch name not in UTF-8!'))
708 else:
726 else:
709 branchname = ""
727 branchname = ""
710
728
711 if use_dirstate:
729 if use_dirstate:
712 oldname = c1[5].get("branch", "") # stored in UTF-8
730 oldname = c1[5].get("branch", "") # stored in UTF-8
713 if not commit and not remove and not force and p2 == nullid and \
731 if not commit and not remove and not force and p2 == nullid and \
714 branchname == oldname:
732 branchname == oldname:
715 self.ui.status(_("nothing changed\n"))
733 self.ui.status(_("nothing changed\n"))
716 return None
734 return None
717
735
718 xp1 = hex(p1)
736 xp1 = hex(p1)
719 if p2 == nullid: xp2 = ''
737 if p2 == nullid: xp2 = ''
720 else: xp2 = hex(p2)
738 else: xp2 = hex(p2)
721
739
722 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
740 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
723
741
724 if not wlock:
742 if not wlock:
725 wlock = self.wlock()
743 wlock = self.wlock()
726 if not lock:
744 if not lock:
727 lock = self.lock()
745 lock = self.lock()
728 tr = self.transaction()
746 tr = self.transaction()
729
747
730 # check in files
748 # check in files
731 new = {}
749 new = {}
732 linkrev = self.changelog.count()
750 linkrev = self.changelog.count()
733 commit.sort()
751 commit.sort()
734 for f in commit:
752 for f in commit:
735 self.ui.note(f + "\n")
753 self.ui.note(f + "\n")
736 try:
754 try:
737 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
755 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
738 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
756 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
739 except IOError:
757 except IOError:
740 if use_dirstate:
758 if use_dirstate:
741 self.ui.warn(_("trouble committing %s!\n") % f)
759 self.ui.warn(_("trouble committing %s!\n") % f)
742 raise
760 raise
743 else:
761 else:
744 remove.append(f)
762 remove.append(f)
745
763
746 # update manifest
764 # update manifest
747 m1.update(new)
765 m1.update(new)
748 remove.sort()
766 remove.sort()
749
767
750 for f in remove:
768 for f in remove:
751 if f in m1:
769 if f in m1:
752 del m1[f]
770 del m1[f]
753 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
771 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
754
772
755 # add changeset
773 # add changeset
756 new = new.keys()
774 new = new.keys()
757 new.sort()
775 new.sort()
758
776
759 user = user or self.ui.username()
777 user = user or self.ui.username()
760 if not text or force_editor:
778 if not text or force_editor:
761 edittext = []
779 edittext = []
762 if text:
780 if text:
763 edittext.append(text)
781 edittext.append(text)
764 edittext.append("")
782 edittext.append("")
765 edittext.append("HG: user: %s" % user)
783 edittext.append("HG: user: %s" % user)
766 if p2 != nullid:
784 if p2 != nullid:
767 edittext.append("HG: branch merge")
785 edittext.append("HG: branch merge")
768 edittext.extend(["HG: changed %s" % f for f in changed])
786 edittext.extend(["HG: changed %s" % f for f in changed])
769 edittext.extend(["HG: removed %s" % f for f in remove])
787 edittext.extend(["HG: removed %s" % f for f in remove])
770 if not changed and not remove:
788 if not changed and not remove:
771 edittext.append("HG: no files changed")
789 edittext.append("HG: no files changed")
772 edittext.append("")
790 edittext.append("")
773 # run editor in the repository root
791 # run editor in the repository root
774 olddir = os.getcwd()
792 olddir = os.getcwd()
775 os.chdir(self.root)
793 os.chdir(self.root)
776 text = self.ui.edit("\n".join(edittext), user)
794 text = self.ui.edit("\n".join(edittext), user)
777 os.chdir(olddir)
795 os.chdir(olddir)
778
796
779 lines = [line.rstrip() for line in text.rstrip().splitlines()]
797 lines = [line.rstrip() for line in text.rstrip().splitlines()]
780 while lines and not lines[0]:
798 while lines and not lines[0]:
781 del lines[0]
799 del lines[0]
782 if not lines:
800 if not lines:
783 return None
801 return None
784 text = '\n'.join(lines)
802 text = '\n'.join(lines)
785 if branchname:
803 if branchname:
786 extra["branch"] = branchname
804 extra["branch"] = branchname
787 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
805 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
788 user, date, extra)
806 user, date, extra)
789 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
807 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
790 parent2=xp2)
808 parent2=xp2)
791 tr.close()
809 tr.close()
792
810
793 if use_dirstate or update_dirstate:
811 if use_dirstate or update_dirstate:
794 self.dirstate.setparents(n)
812 self.dirstate.setparents(n)
795 if use_dirstate:
813 if use_dirstate:
796 self.dirstate.update(new, "n")
814 self.dirstate.update(new, "n")
797 self.dirstate.forget(remove)
815 self.dirstate.forget(remove)
798
816
799 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
817 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
800 return n
818 return n
801
819
802 def walk(self, node=None, files=[], match=util.always, badmatch=None):
820 def walk(self, node=None, files=[], match=util.always, badmatch=None):
803 '''
821 '''
804 walk recursively through the directory tree or a given
822 walk recursively through the directory tree or a given
805 changeset, finding all files matched by the match
823 changeset, finding all files matched by the match
806 function
824 function
807
825
808 results are yielded in a tuple (src, filename), where src
826 results are yielded in a tuple (src, filename), where src
809 is one of:
827 is one of:
810 'f' the file was found in the directory tree
828 'f' the file was found in the directory tree
811 'm' the file was only in the dirstate and not in the tree
829 'm' the file was only in the dirstate and not in the tree
812 'b' file was not found and matched badmatch
830 'b' file was not found and matched badmatch
813 '''
831 '''
814
832
815 if node:
833 if node:
816 fdict = dict.fromkeys(files)
834 fdict = dict.fromkeys(files)
817 for fn in self.manifest.read(self.changelog.read(node)[0]):
835 for fn in self.manifest.read(self.changelog.read(node)[0]):
818 for ffn in fdict:
836 for ffn in fdict:
819 # match if the file is the exact name or a directory
837 # match if the file is the exact name or a directory
820 if ffn == fn or fn.startswith("%s/" % ffn):
838 if ffn == fn or fn.startswith("%s/" % ffn):
821 del fdict[ffn]
839 del fdict[ffn]
822 break
840 break
823 if match(fn):
841 if match(fn):
824 yield 'm', fn
842 yield 'm', fn
825 for fn in fdict:
843 for fn in fdict:
826 if badmatch and badmatch(fn):
844 if badmatch and badmatch(fn):
827 if match(fn):
845 if match(fn):
828 yield 'b', fn
846 yield 'b', fn
829 else:
847 else:
830 self.ui.warn(_('%s: No such file in rev %s\n') % (
848 self.ui.warn(_('%s: No such file in rev %s\n') % (
831 util.pathto(self.getcwd(), fn), short(node)))
849 util.pathto(self.getcwd(), fn), short(node)))
832 else:
850 else:
833 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
851 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
834 yield src, fn
852 yield src, fn
835
853
836 def status(self, node1=None, node2=None, files=[], match=util.always,
854 def status(self, node1=None, node2=None, files=[], match=util.always,
837 wlock=None, list_ignored=False, list_clean=False):
855 wlock=None, list_ignored=False, list_clean=False):
838 """return status of files between two nodes or node and working directory
856 """return status of files between two nodes or node and working directory
839
857
840 If node1 is None, use the first dirstate parent instead.
858 If node1 is None, use the first dirstate parent instead.
841 If node2 is None, compare node1 with working directory.
859 If node2 is None, compare node1 with working directory.
842 """
860 """
843
861
844 def fcmp(fn, mf):
862 def fcmp(fn, mf):
845 t1 = self.wread(fn)
863 t1 = self.wread(fn)
846 return self.file(fn).cmp(mf.get(fn, nullid), t1)
864 return self.file(fn).cmp(mf.get(fn, nullid), t1)
847
865
848 def mfmatches(node):
866 def mfmatches(node):
849 change = self.changelog.read(node)
867 change = self.changelog.read(node)
850 mf = self.manifest.read(change[0]).copy()
868 mf = self.manifest.read(change[0]).copy()
851 for fn in mf.keys():
869 for fn in mf.keys():
852 if not match(fn):
870 if not match(fn):
853 del mf[fn]
871 del mf[fn]
854 return mf
872 return mf
855
873
856 modified, added, removed, deleted, unknown = [], [], [], [], []
874 modified, added, removed, deleted, unknown = [], [], [], [], []
857 ignored, clean = [], []
875 ignored, clean = [], []
858
876
859 compareworking = False
877 compareworking = False
860 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
878 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
861 compareworking = True
879 compareworking = True
862
880
863 if not compareworking:
881 if not compareworking:
864 # read the manifest from node1 before the manifest from node2,
882 # read the manifest from node1 before the manifest from node2,
865 # so that we'll hit the manifest cache if we're going through
883 # so that we'll hit the manifest cache if we're going through
866 # all the revisions in parent->child order.
884 # all the revisions in parent->child order.
867 mf1 = mfmatches(node1)
885 mf1 = mfmatches(node1)
868
886
869 # are we comparing the working directory?
887 # are we comparing the working directory?
870 if not node2:
888 if not node2:
871 if not wlock:
889 if not wlock:
872 try:
890 try:
873 wlock = self.wlock(wait=0)
891 wlock = self.wlock(wait=0)
874 except lock.LockException:
892 except lock.LockException:
875 wlock = None
893 wlock = None
876 (lookup, modified, added, removed, deleted, unknown,
894 (lookup, modified, added, removed, deleted, unknown,
877 ignored, clean) = self.dirstate.status(files, match,
895 ignored, clean) = self.dirstate.status(files, match,
878 list_ignored, list_clean)
896 list_ignored, list_clean)
879
897
880 # are we comparing working dir against its parent?
898 # are we comparing working dir against its parent?
881 if compareworking:
899 if compareworking:
882 if lookup:
900 if lookup:
883 # do a full compare of any files that might have changed
901 # do a full compare of any files that might have changed
884 mf2 = mfmatches(self.dirstate.parents()[0])
902 mf2 = mfmatches(self.dirstate.parents()[0])
885 for f in lookup:
903 for f in lookup:
886 if fcmp(f, mf2):
904 if fcmp(f, mf2):
887 modified.append(f)
905 modified.append(f)
888 else:
906 else:
889 clean.append(f)
907 clean.append(f)
890 if wlock is not None:
908 if wlock is not None:
891 self.dirstate.update([f], "n")
909 self.dirstate.update([f], "n")
892 else:
910 else:
893 # we are comparing working dir against non-parent
911 # we are comparing working dir against non-parent
894 # generate a pseudo-manifest for the working dir
912 # generate a pseudo-manifest for the working dir
895 # XXX: create it in dirstate.py ?
913 # XXX: create it in dirstate.py ?
896 mf2 = mfmatches(self.dirstate.parents()[0])
914 mf2 = mfmatches(self.dirstate.parents()[0])
897 for f in lookup + modified + added:
915 for f in lookup + modified + added:
898 mf2[f] = ""
916 mf2[f] = ""
899 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
917 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
900 for f in removed:
918 for f in removed:
901 if f in mf2:
919 if f in mf2:
902 del mf2[f]
920 del mf2[f]
903 else:
921 else:
904 # we are comparing two revisions
922 # we are comparing two revisions
905 mf2 = mfmatches(node2)
923 mf2 = mfmatches(node2)
906
924
907 if not compareworking:
925 if not compareworking:
908 # flush lists from dirstate before comparing manifests
926 # flush lists from dirstate before comparing manifests
909 modified, added, clean = [], [], []
927 modified, added, clean = [], [], []
910
928
911 # make sure to sort the files so we talk to the disk in a
929 # make sure to sort the files so we talk to the disk in a
912 # reasonable order
930 # reasonable order
913 mf2keys = mf2.keys()
931 mf2keys = mf2.keys()
914 mf2keys.sort()
932 mf2keys.sort()
915 for fn in mf2keys:
933 for fn in mf2keys:
916 if mf1.has_key(fn):
934 if mf1.has_key(fn):
917 if mf1.flags(fn) != mf2.flags(fn) or \
935 if mf1.flags(fn) != mf2.flags(fn) or \
918 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
936 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
919 modified.append(fn)
937 modified.append(fn)
920 elif list_clean:
938 elif list_clean:
921 clean.append(fn)
939 clean.append(fn)
922 del mf1[fn]
940 del mf1[fn]
923 else:
941 else:
924 added.append(fn)
942 added.append(fn)
925
943
926 removed = mf1.keys()
944 removed = mf1.keys()
927
945
928 # sort and return results:
946 # sort and return results:
929 for l in modified, added, removed, deleted, unknown, ignored, clean:
947 for l in modified, added, removed, deleted, unknown, ignored, clean:
930 l.sort()
948 l.sort()
931 return (modified, added, removed, deleted, unknown, ignored, clean)
949 return (modified, added, removed, deleted, unknown, ignored, clean)
932
950
933 def add(self, list, wlock=None):
951 def add(self, list, wlock=None):
934 if not wlock:
952 if not wlock:
935 wlock = self.wlock()
953 wlock = self.wlock()
936 for f in list:
954 for f in list:
937 p = self.wjoin(f)
955 p = self.wjoin(f)
938 if not os.path.exists(p):
956 if not os.path.exists(p):
939 self.ui.warn(_("%s does not exist!\n") % f)
957 self.ui.warn(_("%s does not exist!\n") % f)
940 elif not os.path.isfile(p):
958 elif not os.path.isfile(p):
941 self.ui.warn(_("%s not added: only files supported currently\n")
959 self.ui.warn(_("%s not added: only files supported currently\n")
942 % f)
960 % f)
943 elif self.dirstate.state(f) in 'an':
961 elif self.dirstate.state(f) in 'an':
944 self.ui.warn(_("%s already tracked!\n") % f)
962 self.ui.warn(_("%s already tracked!\n") % f)
945 else:
963 else:
946 self.dirstate.update([f], "a")
964 self.dirstate.update([f], "a")
947
965
948 def forget(self, list, wlock=None):
966 def forget(self, list, wlock=None):
949 if not wlock:
967 if not wlock:
950 wlock = self.wlock()
968 wlock = self.wlock()
951 for f in list:
969 for f in list:
952 if self.dirstate.state(f) not in 'ai':
970 if self.dirstate.state(f) not in 'ai':
953 self.ui.warn(_("%s not added!\n") % f)
971 self.ui.warn(_("%s not added!\n") % f)
954 else:
972 else:
955 self.dirstate.forget([f])
973 self.dirstate.forget([f])
956
974
957 def remove(self, list, unlink=False, wlock=None):
975 def remove(self, list, unlink=False, wlock=None):
958 if unlink:
976 if unlink:
959 for f in list:
977 for f in list:
960 try:
978 try:
961 util.unlink(self.wjoin(f))
979 util.unlink(self.wjoin(f))
962 except OSError, inst:
980 except OSError, inst:
963 if inst.errno != errno.ENOENT:
981 if inst.errno != errno.ENOENT:
964 raise
982 raise
965 if not wlock:
983 if not wlock:
966 wlock = self.wlock()
984 wlock = self.wlock()
967 for f in list:
985 for f in list:
968 p = self.wjoin(f)
986 p = self.wjoin(f)
969 if os.path.exists(p):
987 if os.path.exists(p):
970 self.ui.warn(_("%s still exists!\n") % f)
988 self.ui.warn(_("%s still exists!\n") % f)
971 elif self.dirstate.state(f) == 'a':
989 elif self.dirstate.state(f) == 'a':
972 self.dirstate.forget([f])
990 self.dirstate.forget([f])
973 elif f not in self.dirstate:
991 elif f not in self.dirstate:
974 self.ui.warn(_("%s not tracked!\n") % f)
992 self.ui.warn(_("%s not tracked!\n") % f)
975 else:
993 else:
976 self.dirstate.update([f], "r")
994 self.dirstate.update([f], "r")
977
995
978 def undelete(self, list, wlock=None):
996 def undelete(self, list, wlock=None):
979 p = self.dirstate.parents()[0]
997 p = self.dirstate.parents()[0]
980 mn = self.changelog.read(p)[0]
998 mn = self.changelog.read(p)[0]
981 m = self.manifest.read(mn)
999 m = self.manifest.read(mn)
982 if not wlock:
1000 if not wlock:
983 wlock = self.wlock()
1001 wlock = self.wlock()
984 for f in list:
1002 for f in list:
985 if self.dirstate.state(f) not in "r":
1003 if self.dirstate.state(f) not in "r":
986 self.ui.warn("%s not removed!\n" % f)
1004 self.ui.warn("%s not removed!\n" % f)
987 else:
1005 else:
988 t = self.file(f).read(m[f])
1006 t = self.file(f).read(m[f])
989 self.wwrite(f, t)
1007 self.wwrite(f, t)
990 util.set_exec(self.wjoin(f), m.execf(f))
1008 util.set_exec(self.wjoin(f), m.execf(f))
991 self.dirstate.update([f], "n")
1009 self.dirstate.update([f], "n")
992
1010
993 def copy(self, source, dest, wlock=None):
1011 def copy(self, source, dest, wlock=None):
994 p = self.wjoin(dest)
1012 p = self.wjoin(dest)
995 if not os.path.exists(p):
1013 if not os.path.exists(p):
996 self.ui.warn(_("%s does not exist!\n") % dest)
1014 self.ui.warn(_("%s does not exist!\n") % dest)
997 elif not os.path.isfile(p):
1015 elif not os.path.isfile(p):
998 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1016 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
999 else:
1017 else:
1000 if not wlock:
1018 if not wlock:
1001 wlock = self.wlock()
1019 wlock = self.wlock()
1002 if self.dirstate.state(dest) == '?':
1020 if self.dirstate.state(dest) == '?':
1003 self.dirstate.update([dest], "a")
1021 self.dirstate.update([dest], "a")
1004 self.dirstate.copy(source, dest)
1022 self.dirstate.copy(source, dest)
1005
1023
1006 def heads(self, start=None):
1024 def heads(self, start=None):
1007 heads = self.changelog.heads(start)
1025 heads = self.changelog.heads(start)
1008 # sort the output in rev descending order
1026 # sort the output in rev descending order
1009 heads = [(-self.changelog.rev(h), h) for h in heads]
1027 heads = [(-self.changelog.rev(h), h) for h in heads]
1010 heads.sort()
1028 heads.sort()
1011 return [n for (r, n) in heads]
1029 return [n for (r, n) in heads]
1012
1030
1013 # branchlookup returns a dict giving a list of branches for
1031 # branchlookup returns a dict giving a list of branches for
1014 # each head. A branch is defined as the tag of a node or
1032 # each head. A branch is defined as the tag of a node or
1015 # the branch of the node's parents. If a node has multiple
1033 # the branch of the node's parents. If a node has multiple
1016 # branch tags, tags are eliminated if they are visible from other
1034 # branch tags, tags are eliminated if they are visible from other
1017 # branch tags.
1035 # branch tags.
1018 #
1036 #
1019 # So, for this graph: a->b->c->d->e
1037 # So, for this graph: a->b->c->d->e
1020 # \ /
1038 # \ /
1021 # aa -----/
1039 # aa -----/
1022 # a has tag 2.6.12
1040 # a has tag 2.6.12
1023 # d has tag 2.6.13
1041 # d has tag 2.6.13
1024 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1042 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1025 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1043 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1026 # from the list.
1044 # from the list.
1027 #
1045 #
1028 # It is possible that more than one head will have the same branch tag.
1046 # It is possible that more than one head will have the same branch tag.
1029 # callers need to check the result for multiple heads under the same
1047 # callers need to check the result for multiple heads under the same
1030 # branch tag if that is a problem for them (ie checkout of a specific
1048 # branch tag if that is a problem for them (ie checkout of a specific
1031 # branch).
1049 # branch).
1032 #
1050 #
1033 # passing in a specific branch will limit the depth of the search
1051 # passing in a specific branch will limit the depth of the search
1034 # through the parents. It won't limit the branches returned in the
1052 # through the parents. It won't limit the branches returned in the
1035 # result though.
1053 # result though.
1036 def branchlookup(self, heads=None, branch=None):
1054 def branchlookup(self, heads=None, branch=None):
1037 if not heads:
1055 if not heads:
1038 heads = self.heads()
1056 heads = self.heads()
1039 headt = [ h for h in heads ]
1057 headt = [ h for h in heads ]
1040 chlog = self.changelog
1058 chlog = self.changelog
1041 branches = {}
1059 branches = {}
1042 merges = []
1060 merges = []
1043 seenmerge = {}
1061 seenmerge = {}
1044
1062
1045 # traverse the tree once for each head, recording in the branches
1063 # traverse the tree once for each head, recording in the branches
1046 # dict which tags are visible from this head. The branches
1064 # dict which tags are visible from this head. The branches
1047 # dict also records which tags are visible from each tag
1065 # dict also records which tags are visible from each tag
1048 # while we traverse.
1066 # while we traverse.
1049 while headt or merges:
1067 while headt or merges:
1050 if merges:
1068 if merges:
1051 n, found = merges.pop()
1069 n, found = merges.pop()
1052 visit = [n]
1070 visit = [n]
1053 else:
1071 else:
1054 h = headt.pop()
1072 h = headt.pop()
1055 visit = [h]
1073 visit = [h]
1056 found = [h]
1074 found = [h]
1057 seen = {}
1075 seen = {}
1058 while visit:
1076 while visit:
1059 n = visit.pop()
1077 n = visit.pop()
1060 if n in seen:
1078 if n in seen:
1061 continue
1079 continue
1062 pp = chlog.parents(n)
1080 pp = chlog.parents(n)
1063 tags = self.nodetags(n)
1081 tags = self.nodetags(n)
1064 if tags:
1082 if tags:
1065 for x in tags:
1083 for x in tags:
1066 if x == 'tip':
1084 if x == 'tip':
1067 continue
1085 continue
1068 for f in found:
1086 for f in found:
1069 branches.setdefault(f, {})[n] = 1
1087 branches.setdefault(f, {})[n] = 1
1070 branches.setdefault(n, {})[n] = 1
1088 branches.setdefault(n, {})[n] = 1
1071 break
1089 break
1072 if n not in found:
1090 if n not in found:
1073 found.append(n)
1091 found.append(n)
1074 if branch in tags:
1092 if branch in tags:
1075 continue
1093 continue
1076 seen[n] = 1
1094 seen[n] = 1
1077 if pp[1] != nullid and n not in seenmerge:
1095 if pp[1] != nullid and n not in seenmerge:
1078 merges.append((pp[1], [x for x in found]))
1096 merges.append((pp[1], [x for x in found]))
1079 seenmerge[n] = 1
1097 seenmerge[n] = 1
1080 if pp[0] != nullid:
1098 if pp[0] != nullid:
1081 visit.append(pp[0])
1099 visit.append(pp[0])
1082 # traverse the branches dict, eliminating branch tags from each
1100 # traverse the branches dict, eliminating branch tags from each
1083 # head that are visible from another branch tag for that head.
1101 # head that are visible from another branch tag for that head.
1084 out = {}
1102 out = {}
1085 viscache = {}
1103 viscache = {}
1086 for h in heads:
1104 for h in heads:
1087 def visible(node):
1105 def visible(node):
1088 if node in viscache:
1106 if node in viscache:
1089 return viscache[node]
1107 return viscache[node]
1090 ret = {}
1108 ret = {}
1091 visit = [node]
1109 visit = [node]
1092 while visit:
1110 while visit:
1093 x = visit.pop()
1111 x = visit.pop()
1094 if x in viscache:
1112 if x in viscache:
1095 ret.update(viscache[x])
1113 ret.update(viscache[x])
1096 elif x not in ret:
1114 elif x not in ret:
1097 ret[x] = 1
1115 ret[x] = 1
1098 if x in branches:
1116 if x in branches:
1099 visit[len(visit):] = branches[x].keys()
1117 visit[len(visit):] = branches[x].keys()
1100 viscache[node] = ret
1118 viscache[node] = ret
1101 return ret
1119 return ret
1102 if h not in branches:
1120 if h not in branches:
1103 continue
1121 continue
1104 # O(n^2), but somewhat limited. This only searches the
1122 # O(n^2), but somewhat limited. This only searches the
1105 # tags visible from a specific head, not all the tags in the
1123 # tags visible from a specific head, not all the tags in the
1106 # whole repo.
1124 # whole repo.
1107 for b in branches[h]:
1125 for b in branches[h]:
1108 vis = False
1126 vis = False
1109 for bb in branches[h].keys():
1127 for bb in branches[h].keys():
1110 if b != bb:
1128 if b != bb:
1111 if b in visible(bb):
1129 if b in visible(bb):
1112 vis = True
1130 vis = True
1113 break
1131 break
1114 if not vis:
1132 if not vis:
1115 l = out.setdefault(h, [])
1133 l = out.setdefault(h, [])
1116 l[len(l):] = self.nodetags(b)
1134 l[len(l):] = self.nodetags(b)
1117 return out
1135 return out
1118
1136
1119 def branches(self, nodes):
1137 def branches(self, nodes):
1120 if not nodes:
1138 if not nodes:
1121 nodes = [self.changelog.tip()]
1139 nodes = [self.changelog.tip()]
1122 b = []
1140 b = []
1123 for n in nodes:
1141 for n in nodes:
1124 t = n
1142 t = n
1125 while 1:
1143 while 1:
1126 p = self.changelog.parents(n)
1144 p = self.changelog.parents(n)
1127 if p[1] != nullid or p[0] == nullid:
1145 if p[1] != nullid or p[0] == nullid:
1128 b.append((t, n, p[0], p[1]))
1146 b.append((t, n, p[0], p[1]))
1129 break
1147 break
1130 n = p[0]
1148 n = p[0]
1131 return b
1149 return b
1132
1150
1133 def between(self, pairs):
1151 def between(self, pairs):
1134 r = []
1152 r = []
1135
1153
1136 for top, bottom in pairs:
1154 for top, bottom in pairs:
1137 n, l, i = top, [], 0
1155 n, l, i = top, [], 0
1138 f = 1
1156 f = 1
1139
1157
1140 while n != bottom:
1158 while n != bottom:
1141 p = self.changelog.parents(n)[0]
1159 p = self.changelog.parents(n)[0]
1142 if i == f:
1160 if i == f:
1143 l.append(n)
1161 l.append(n)
1144 f = f * 2
1162 f = f * 2
1145 n = p
1163 n = p
1146 i += 1
1164 i += 1
1147
1165
1148 r.append(l)
1166 r.append(l)
1149
1167
1150 return r
1168 return r
1151
1169
1152 def findincoming(self, remote, base=None, heads=None, force=False):
1170 def findincoming(self, remote, base=None, heads=None, force=False):
1153 """Return list of roots of the subsets of missing nodes from remote
1171 """Return list of roots of the subsets of missing nodes from remote
1154
1172
1155 If base dict is specified, assume that these nodes and their parents
1173 If base dict is specified, assume that these nodes and their parents
1156 exist on the remote side and that no child of a node of base exists
1174 exist on the remote side and that no child of a node of base exists
1157 in both remote and self.
1175 in both remote and self.
1158 Furthermore base will be updated to include the nodes that exists
1176 Furthermore base will be updated to include the nodes that exists
1159 in self and remote but no children exists in self and remote.
1177 in self and remote but no children exists in self and remote.
1160 If a list of heads is specified, return only nodes which are heads
1178 If a list of heads is specified, return only nodes which are heads
1161 or ancestors of these heads.
1179 or ancestors of these heads.
1162
1180
1163 All the ancestors of base are in self and in remote.
1181 All the ancestors of base are in self and in remote.
1164 All the descendants of the list returned are missing in self.
1182 All the descendants of the list returned are missing in self.
1165 (and so we know that the rest of the nodes are missing in remote, see
1183 (and so we know that the rest of the nodes are missing in remote, see
1166 outgoing)
1184 outgoing)
1167 """
1185 """
1168 m = self.changelog.nodemap
1186 m = self.changelog.nodemap
1169 search = []
1187 search = []
1170 fetch = {}
1188 fetch = {}
1171 seen = {}
1189 seen = {}
1172 seenbranch = {}
1190 seenbranch = {}
1173 if base == None:
1191 if base == None:
1174 base = {}
1192 base = {}
1175
1193
1176 if not heads:
1194 if not heads:
1177 heads = remote.heads()
1195 heads = remote.heads()
1178
1196
1179 if self.changelog.tip() == nullid:
1197 if self.changelog.tip() == nullid:
1180 base[nullid] = 1
1198 base[nullid] = 1
1181 if heads != [nullid]:
1199 if heads != [nullid]:
1182 return [nullid]
1200 return [nullid]
1183 return []
1201 return []
1184
1202
1185 # assume we're closer to the tip than the root
1203 # assume we're closer to the tip than the root
1186 # and start by examining the heads
1204 # and start by examining the heads
1187 self.ui.status(_("searching for changes\n"))
1205 self.ui.status(_("searching for changes\n"))
1188
1206
1189 unknown = []
1207 unknown = []
1190 for h in heads:
1208 for h in heads:
1191 if h not in m:
1209 if h not in m:
1192 unknown.append(h)
1210 unknown.append(h)
1193 else:
1211 else:
1194 base[h] = 1
1212 base[h] = 1
1195
1213
1196 if not unknown:
1214 if not unknown:
1197 return []
1215 return []
1198
1216
1199 req = dict.fromkeys(unknown)
1217 req = dict.fromkeys(unknown)
1200 reqcnt = 0
1218 reqcnt = 0
1201
1219
1202 # search through remote branches
1220 # search through remote branches
1203 # a 'branch' here is a linear segment of history, with four parts:
1221 # a 'branch' here is a linear segment of history, with four parts:
1204 # head, root, first parent, second parent
1222 # head, root, first parent, second parent
1205 # (a branch always has two parents (or none) by definition)
1223 # (a branch always has two parents (or none) by definition)
1206 unknown = remote.branches(unknown)
1224 unknown = remote.branches(unknown)
1207 while unknown:
1225 while unknown:
1208 r = []
1226 r = []
1209 while unknown:
1227 while unknown:
1210 n = unknown.pop(0)
1228 n = unknown.pop(0)
1211 if n[0] in seen:
1229 if n[0] in seen:
1212 continue
1230 continue
1213
1231
1214 self.ui.debug(_("examining %s:%s\n")
1232 self.ui.debug(_("examining %s:%s\n")
1215 % (short(n[0]), short(n[1])))
1233 % (short(n[0]), short(n[1])))
1216 if n[0] == nullid: # found the end of the branch
1234 if n[0] == nullid: # found the end of the branch
1217 pass
1235 pass
1218 elif n in seenbranch:
1236 elif n in seenbranch:
1219 self.ui.debug(_("branch already found\n"))
1237 self.ui.debug(_("branch already found\n"))
1220 continue
1238 continue
1221 elif n[1] and n[1] in m: # do we know the base?
1239 elif n[1] and n[1] in m: # do we know the base?
1222 self.ui.debug(_("found incomplete branch %s:%s\n")
1240 self.ui.debug(_("found incomplete branch %s:%s\n")
1223 % (short(n[0]), short(n[1])))
1241 % (short(n[0]), short(n[1])))
1224 search.append(n) # schedule branch range for scanning
1242 search.append(n) # schedule branch range for scanning
1225 seenbranch[n] = 1
1243 seenbranch[n] = 1
1226 else:
1244 else:
1227 if n[1] not in seen and n[1] not in fetch:
1245 if n[1] not in seen and n[1] not in fetch:
1228 if n[2] in m and n[3] in m:
1246 if n[2] in m and n[3] in m:
1229 self.ui.debug(_("found new changeset %s\n") %
1247 self.ui.debug(_("found new changeset %s\n") %
1230 short(n[1]))
1248 short(n[1]))
1231 fetch[n[1]] = 1 # earliest unknown
1249 fetch[n[1]] = 1 # earliest unknown
1232 for p in n[2:4]:
1250 for p in n[2:4]:
1233 if p in m:
1251 if p in m:
1234 base[p] = 1 # latest known
1252 base[p] = 1 # latest known
1235
1253
1236 for p in n[2:4]:
1254 for p in n[2:4]:
1237 if p not in req and p not in m:
1255 if p not in req and p not in m:
1238 r.append(p)
1256 r.append(p)
1239 req[p] = 1
1257 req[p] = 1
1240 seen[n[0]] = 1
1258 seen[n[0]] = 1
1241
1259
1242 if r:
1260 if r:
1243 reqcnt += 1
1261 reqcnt += 1
1244 self.ui.debug(_("request %d: %s\n") %
1262 self.ui.debug(_("request %d: %s\n") %
1245 (reqcnt, " ".join(map(short, r))))
1263 (reqcnt, " ".join(map(short, r))))
1246 for p in xrange(0, len(r), 10):
1264 for p in xrange(0, len(r), 10):
1247 for b in remote.branches(r[p:p+10]):
1265 for b in remote.branches(r[p:p+10]):
1248 self.ui.debug(_("received %s:%s\n") %
1266 self.ui.debug(_("received %s:%s\n") %
1249 (short(b[0]), short(b[1])))
1267 (short(b[0]), short(b[1])))
1250 unknown.append(b)
1268 unknown.append(b)
1251
1269
1252 # do binary search on the branches we found
1270 # do binary search on the branches we found
1253 while search:
1271 while search:
1254 n = search.pop(0)
1272 n = search.pop(0)
1255 reqcnt += 1
1273 reqcnt += 1
1256 l = remote.between([(n[0], n[1])])[0]
1274 l = remote.between([(n[0], n[1])])[0]
1257 l.append(n[1])
1275 l.append(n[1])
1258 p = n[0]
1276 p = n[0]
1259 f = 1
1277 f = 1
1260 for i in l:
1278 for i in l:
1261 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1279 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1262 if i in m:
1280 if i in m:
1263 if f <= 2:
1281 if f <= 2:
1264 self.ui.debug(_("found new branch changeset %s\n") %
1282 self.ui.debug(_("found new branch changeset %s\n") %
1265 short(p))
1283 short(p))
1266 fetch[p] = 1
1284 fetch[p] = 1
1267 base[i] = 1
1285 base[i] = 1
1268 else:
1286 else:
1269 self.ui.debug(_("narrowed branch search to %s:%s\n")
1287 self.ui.debug(_("narrowed branch search to %s:%s\n")
1270 % (short(p), short(i)))
1288 % (short(p), short(i)))
1271 search.append((p, i))
1289 search.append((p, i))
1272 break
1290 break
1273 p, f = i, f * 2
1291 p, f = i, f * 2
1274
1292
1275 # sanity check our fetch list
1293 # sanity check our fetch list
1276 for f in fetch.keys():
1294 for f in fetch.keys():
1277 if f in m:
1295 if f in m:
1278 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1296 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1279
1297
1280 if base.keys() == [nullid]:
1298 if base.keys() == [nullid]:
1281 if force:
1299 if force:
1282 self.ui.warn(_("warning: repository is unrelated\n"))
1300 self.ui.warn(_("warning: repository is unrelated\n"))
1283 else:
1301 else:
1284 raise util.Abort(_("repository is unrelated"))
1302 raise util.Abort(_("repository is unrelated"))
1285
1303
1286 self.ui.debug(_("found new changesets starting at ") +
1304 self.ui.debug(_("found new changesets starting at ") +
1287 " ".join([short(f) for f in fetch]) + "\n")
1305 " ".join([short(f) for f in fetch]) + "\n")
1288
1306
1289 self.ui.debug(_("%d total queries\n") % reqcnt)
1307 self.ui.debug(_("%d total queries\n") % reqcnt)
1290
1308
1291 return fetch.keys()
1309 return fetch.keys()
1292
1310
1293 def findoutgoing(self, remote, base=None, heads=None, force=False):
1311 def findoutgoing(self, remote, base=None, heads=None, force=False):
1294 """Return list of nodes that are roots of subsets not in remote
1312 """Return list of nodes that are roots of subsets not in remote
1295
1313
1296 If base dict is specified, assume that these nodes and their parents
1314 If base dict is specified, assume that these nodes and their parents
1297 exist on the remote side.
1315 exist on the remote side.
1298 If a list of heads is specified, return only nodes which are heads
1316 If a list of heads is specified, return only nodes which are heads
1299 or ancestors of these heads, and return a second element which
1317 or ancestors of these heads, and return a second element which
1300 contains all remote heads which get new children.
1318 contains all remote heads which get new children.
1301 """
1319 """
1302 if base == None:
1320 if base == None:
1303 base = {}
1321 base = {}
1304 self.findincoming(remote, base, heads, force=force)
1322 self.findincoming(remote, base, heads, force=force)
1305
1323
1306 self.ui.debug(_("common changesets up to ")
1324 self.ui.debug(_("common changesets up to ")
1307 + " ".join(map(short, base.keys())) + "\n")
1325 + " ".join(map(short, base.keys())) + "\n")
1308
1326
1309 remain = dict.fromkeys(self.changelog.nodemap)
1327 remain = dict.fromkeys(self.changelog.nodemap)
1310
1328
1311 # prune everything remote has from the tree
1329 # prune everything remote has from the tree
1312 del remain[nullid]
1330 del remain[nullid]
1313 remove = base.keys()
1331 remove = base.keys()
1314 while remove:
1332 while remove:
1315 n = remove.pop(0)
1333 n = remove.pop(0)
1316 if n in remain:
1334 if n in remain:
1317 del remain[n]
1335 del remain[n]
1318 for p in self.changelog.parents(n):
1336 for p in self.changelog.parents(n):
1319 remove.append(p)
1337 remove.append(p)
1320
1338
1321 # find every node whose parents have been pruned
1339 # find every node whose parents have been pruned
1322 subset = []
1340 subset = []
1323 # find every remote head that will get new children
1341 # find every remote head that will get new children
1324 updated_heads = {}
1342 updated_heads = {}
1325 for n in remain:
1343 for n in remain:
1326 p1, p2 = self.changelog.parents(n)
1344 p1, p2 = self.changelog.parents(n)
1327 if p1 not in remain and p2 not in remain:
1345 if p1 not in remain and p2 not in remain:
1328 subset.append(n)
1346 subset.append(n)
1329 if heads:
1347 if heads:
1330 if p1 in heads:
1348 if p1 in heads:
1331 updated_heads[p1] = True
1349 updated_heads[p1] = True
1332 if p2 in heads:
1350 if p2 in heads:
1333 updated_heads[p2] = True
1351 updated_heads[p2] = True
1334
1352
1335 # this is the set of all roots we have to push
1353 # this is the set of all roots we have to push
1336 if heads:
1354 if heads:
1337 return subset, updated_heads.keys()
1355 return subset, updated_heads.keys()
1338 else:
1356 else:
1339 return subset
1357 return subset
1340
1358
1341 def pull(self, remote, heads=None, force=False, lock=None):
1359 def pull(self, remote, heads=None, force=False, lock=None):
1342 mylock = False
1360 mylock = False
1343 if not lock:
1361 if not lock:
1344 lock = self.lock()
1362 lock = self.lock()
1345 mylock = True
1363 mylock = True
1346
1364
1347 try:
1365 try:
1348 fetch = self.findincoming(remote, force=force)
1366 fetch = self.findincoming(remote, force=force)
1349 if fetch == [nullid]:
1367 if fetch == [nullid]:
1350 self.ui.status(_("requesting all changes\n"))
1368 self.ui.status(_("requesting all changes\n"))
1351
1369
1352 if not fetch:
1370 if not fetch:
1353 self.ui.status(_("no changes found\n"))
1371 self.ui.status(_("no changes found\n"))
1354 return 0
1372 return 0
1355
1373
1356 if heads is None:
1374 if heads is None:
1357 cg = remote.changegroup(fetch, 'pull')
1375 cg = remote.changegroup(fetch, 'pull')
1358 else:
1376 else:
1359 if 'changegroupsubset' not in remote.capabilities:
1377 if 'changegroupsubset' not in remote.capabilities:
1360 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1378 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1361 cg = remote.changegroupsubset(fetch, heads, 'pull')
1379 cg = remote.changegroupsubset(fetch, heads, 'pull')
1362 return self.addchangegroup(cg, 'pull', remote.url())
1380 return self.addchangegroup(cg, 'pull', remote.url())
1363 finally:
1381 finally:
1364 if mylock:
1382 if mylock:
1365 lock.release()
1383 lock.release()
1366
1384
1367 def push(self, remote, force=False, revs=None):
1385 def push(self, remote, force=False, revs=None):
1368 # there are two ways to push to remote repo:
1386 # there are two ways to push to remote repo:
1369 #
1387 #
1370 # addchangegroup assumes local user can lock remote
1388 # addchangegroup assumes local user can lock remote
1371 # repo (local filesystem, old ssh servers).
1389 # repo (local filesystem, old ssh servers).
1372 #
1390 #
1373 # unbundle assumes local user cannot lock remote repo (new ssh
1391 # unbundle assumes local user cannot lock remote repo (new ssh
1374 # servers, http servers).
1392 # servers, http servers).
1375
1393
1376 if remote.capable('unbundle'):
1394 if remote.capable('unbundle'):
1377 return self.push_unbundle(remote, force, revs)
1395 return self.push_unbundle(remote, force, revs)
1378 return self.push_addchangegroup(remote, force, revs)
1396 return self.push_addchangegroup(remote, force, revs)
1379
1397
1380 def prepush(self, remote, force, revs):
1398 def prepush(self, remote, force, revs):
1381 base = {}
1399 base = {}
1382 remote_heads = remote.heads()
1400 remote_heads = remote.heads()
1383 inc = self.findincoming(remote, base, remote_heads, force=force)
1401 inc = self.findincoming(remote, base, remote_heads, force=force)
1384
1402
1385 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1403 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1386 if revs is not None:
1404 if revs is not None:
1387 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1405 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1388 else:
1406 else:
1389 bases, heads = update, self.changelog.heads()
1407 bases, heads = update, self.changelog.heads()
1390
1408
1391 if not bases:
1409 if not bases:
1392 self.ui.status(_("no changes found\n"))
1410 self.ui.status(_("no changes found\n"))
1393 return None, 1
1411 return None, 1
1394 elif not force:
1412 elif not force:
1395 # check if we're creating new remote heads
1413 # check if we're creating new remote heads
1396 # to be a remote head after push, node must be either
1414 # to be a remote head after push, node must be either
1397 # - unknown locally
1415 # - unknown locally
1398 # - a local outgoing head descended from update
1416 # - a local outgoing head descended from update
1399 # - a remote head that's known locally and not
1417 # - a remote head that's known locally and not
1400 # ancestral to an outgoing head
1418 # ancestral to an outgoing head
1401
1419
1402 warn = 0
1420 warn = 0
1403
1421
1404 if remote_heads == [nullid]:
1422 if remote_heads == [nullid]:
1405 warn = 0
1423 warn = 0
1406 elif not revs and len(heads) > len(remote_heads):
1424 elif not revs and len(heads) > len(remote_heads):
1407 warn = 1
1425 warn = 1
1408 else:
1426 else:
1409 newheads = list(heads)
1427 newheads = list(heads)
1410 for r in remote_heads:
1428 for r in remote_heads:
1411 if r in self.changelog.nodemap:
1429 if r in self.changelog.nodemap:
1412 desc = self.changelog.heads(r, heads)
1430 desc = self.changelog.heads(r, heads)
1413 l = [h for h in heads if h in desc]
1431 l = [h for h in heads if h in desc]
1414 if not l:
1432 if not l:
1415 newheads.append(r)
1433 newheads.append(r)
1416 else:
1434 else:
1417 newheads.append(r)
1435 newheads.append(r)
1418 if len(newheads) > len(remote_heads):
1436 if len(newheads) > len(remote_heads):
1419 warn = 1
1437 warn = 1
1420
1438
1421 if warn:
1439 if warn:
1422 self.ui.warn(_("abort: push creates new remote branches!\n"))
1440 self.ui.warn(_("abort: push creates new remote branches!\n"))
1423 self.ui.status(_("(did you forget to merge?"
1441 self.ui.status(_("(did you forget to merge?"
1424 " use push -f to force)\n"))
1442 " use push -f to force)\n"))
1425 return None, 1
1443 return None, 1
1426 elif inc:
1444 elif inc:
1427 self.ui.warn(_("note: unsynced remote changes!\n"))
1445 self.ui.warn(_("note: unsynced remote changes!\n"))
1428
1446
1429
1447
1430 if revs is None:
1448 if revs is None:
1431 cg = self.changegroup(update, 'push')
1449 cg = self.changegroup(update, 'push')
1432 else:
1450 else:
1433 cg = self.changegroupsubset(update, revs, 'push')
1451 cg = self.changegroupsubset(update, revs, 'push')
1434 return cg, remote_heads
1452 return cg, remote_heads
1435
1453
1436 def push_addchangegroup(self, remote, force, revs):
1454 def push_addchangegroup(self, remote, force, revs):
1437 lock = remote.lock()
1455 lock = remote.lock()
1438
1456
1439 ret = self.prepush(remote, force, revs)
1457 ret = self.prepush(remote, force, revs)
1440 if ret[0] is not None:
1458 if ret[0] is not None:
1441 cg, remote_heads = ret
1459 cg, remote_heads = ret
1442 return remote.addchangegroup(cg, 'push', self.url())
1460 return remote.addchangegroup(cg, 'push', self.url())
1443 return ret[1]
1461 return ret[1]
1444
1462
1445 def push_unbundle(self, remote, force, revs):
1463 def push_unbundle(self, remote, force, revs):
1446 # local repo finds heads on server, finds out what revs it
1464 # local repo finds heads on server, finds out what revs it
1447 # must push. once revs transferred, if server finds it has
1465 # must push. once revs transferred, if server finds it has
1448 # different heads (someone else won commit/push race), server
1466 # different heads (someone else won commit/push race), server
1449 # aborts.
1467 # aborts.
1450
1468
1451 ret = self.prepush(remote, force, revs)
1469 ret = self.prepush(remote, force, revs)
1452 if ret[0] is not None:
1470 if ret[0] is not None:
1453 cg, remote_heads = ret
1471 cg, remote_heads = ret
1454 if force: remote_heads = ['force']
1472 if force: remote_heads = ['force']
1455 return remote.unbundle(cg, remote_heads, 'push')
1473 return remote.unbundle(cg, remote_heads, 'push')
1456 return ret[1]
1474 return ret[1]
1457
1475
1458 def changegroupinfo(self, nodes):
1476 def changegroupinfo(self, nodes):
1459 self.ui.note(_("%d changesets found\n") % len(nodes))
1477 self.ui.note(_("%d changesets found\n") % len(nodes))
1460 if self.ui.debugflag:
1478 if self.ui.debugflag:
1461 self.ui.debug(_("List of changesets:\n"))
1479 self.ui.debug(_("List of changesets:\n"))
1462 for node in nodes:
1480 for node in nodes:
1463 self.ui.debug("%s\n" % hex(node))
1481 self.ui.debug("%s\n" % hex(node))
1464
1482
1465 def changegroupsubset(self, bases, heads, source):
1483 def changegroupsubset(self, bases, heads, source):
1466 """This function generates a changegroup consisting of all the nodes
1484 """This function generates a changegroup consisting of all the nodes
1467 that are descendents of any of the bases, and ancestors of any of
1485 that are descendents of any of the bases, and ancestors of any of
1468 the heads.
1486 the heads.
1469
1487
1470 It is fairly complex as determining which filenodes and which
1488 It is fairly complex as determining which filenodes and which
1471 manifest nodes need to be included for the changeset to be complete
1489 manifest nodes need to be included for the changeset to be complete
1472 is non-trivial.
1490 is non-trivial.
1473
1491
1474 Another wrinkle is doing the reverse, figuring out which changeset in
1492 Another wrinkle is doing the reverse, figuring out which changeset in
1475 the changegroup a particular filenode or manifestnode belongs to."""
1493 the changegroup a particular filenode or manifestnode belongs to."""
1476
1494
1477 self.hook('preoutgoing', throw=True, source=source)
1495 self.hook('preoutgoing', throw=True, source=source)
1478
1496
1479 # Set up some initial variables
1497 # Set up some initial variables
1480 # Make it easy to refer to self.changelog
1498 # Make it easy to refer to self.changelog
1481 cl = self.changelog
1499 cl = self.changelog
1482 # msng is short for missing - compute the list of changesets in this
1500 # msng is short for missing - compute the list of changesets in this
1483 # changegroup.
1501 # changegroup.
1484 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1502 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1485 self.changegroupinfo(msng_cl_lst)
1503 self.changegroupinfo(msng_cl_lst)
1486 # Some bases may turn out to be superfluous, and some heads may be
1504 # Some bases may turn out to be superfluous, and some heads may be
1487 # too. nodesbetween will return the minimal set of bases and heads
1505 # too. nodesbetween will return the minimal set of bases and heads
1488 # necessary to re-create the changegroup.
1506 # necessary to re-create the changegroup.
1489
1507
1490 # Known heads are the list of heads that it is assumed the recipient
1508 # Known heads are the list of heads that it is assumed the recipient
1491 # of this changegroup will know about.
1509 # of this changegroup will know about.
1492 knownheads = {}
1510 knownheads = {}
1493 # We assume that all parents of bases are known heads.
1511 # We assume that all parents of bases are known heads.
1494 for n in bases:
1512 for n in bases:
1495 for p in cl.parents(n):
1513 for p in cl.parents(n):
1496 if p != nullid:
1514 if p != nullid:
1497 knownheads[p] = 1
1515 knownheads[p] = 1
1498 knownheads = knownheads.keys()
1516 knownheads = knownheads.keys()
1499 if knownheads:
1517 if knownheads:
1500 # Now that we know what heads are known, we can compute which
1518 # Now that we know what heads are known, we can compute which
1501 # changesets are known. The recipient must know about all
1519 # changesets are known. The recipient must know about all
1502 # changesets required to reach the known heads from the null
1520 # changesets required to reach the known heads from the null
1503 # changeset.
1521 # changeset.
1504 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1522 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1505 junk = None
1523 junk = None
1506 # Transform the list into an ersatz set.
1524 # Transform the list into an ersatz set.
1507 has_cl_set = dict.fromkeys(has_cl_set)
1525 has_cl_set = dict.fromkeys(has_cl_set)
1508 else:
1526 else:
1509 # If there were no known heads, the recipient cannot be assumed to
1527 # If there were no known heads, the recipient cannot be assumed to
1510 # know about any changesets.
1528 # know about any changesets.
1511 has_cl_set = {}
1529 has_cl_set = {}
1512
1530
1513 # Make it easy to refer to self.manifest
1531 # Make it easy to refer to self.manifest
1514 mnfst = self.manifest
1532 mnfst = self.manifest
1515 # We don't know which manifests are missing yet
1533 # We don't know which manifests are missing yet
1516 msng_mnfst_set = {}
1534 msng_mnfst_set = {}
1517 # Nor do we know which filenodes are missing.
1535 # Nor do we know which filenodes are missing.
1518 msng_filenode_set = {}
1536 msng_filenode_set = {}
1519
1537
1520 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1538 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1521 junk = None
1539 junk = None
1522
1540
1523 # A changeset always belongs to itself, so the changenode lookup
1541 # A changeset always belongs to itself, so the changenode lookup
1524 # function for a changenode is identity.
1542 # function for a changenode is identity.
1525 def identity(x):
1543 def identity(x):
1526 return x
1544 return x
1527
1545
1528 # A function generating function. Sets up an environment for the
1546 # A function generating function. Sets up an environment for the
1529 # inner function.
1547 # inner function.
1530 def cmp_by_rev_func(revlog):
1548 def cmp_by_rev_func(revlog):
1531 # Compare two nodes by their revision number in the environment's
1549 # Compare two nodes by their revision number in the environment's
1532 # revision history. Since the revision number both represents the
1550 # revision history. Since the revision number both represents the
1533 # most efficient order to read the nodes in, and represents a
1551 # most efficient order to read the nodes in, and represents a
1534 # topological sorting of the nodes, this function is often useful.
1552 # topological sorting of the nodes, this function is often useful.
1535 def cmp_by_rev(a, b):
1553 def cmp_by_rev(a, b):
1536 return cmp(revlog.rev(a), revlog.rev(b))
1554 return cmp(revlog.rev(a), revlog.rev(b))
1537 return cmp_by_rev
1555 return cmp_by_rev
1538
1556
1539 # If we determine that a particular file or manifest node must be a
1557 # If we determine that a particular file or manifest node must be a
1540 # node that the recipient of the changegroup will already have, we can
1558 # node that the recipient of the changegroup will already have, we can
1541 # also assume the recipient will have all the parents. This function
1559 # also assume the recipient will have all the parents. This function
1542 # prunes them from the set of missing nodes.
1560 # prunes them from the set of missing nodes.
1543 def prune_parents(revlog, hasset, msngset):
1561 def prune_parents(revlog, hasset, msngset):
1544 haslst = hasset.keys()
1562 haslst = hasset.keys()
1545 haslst.sort(cmp_by_rev_func(revlog))
1563 haslst.sort(cmp_by_rev_func(revlog))
1546 for node in haslst:
1564 for node in haslst:
1547 parentlst = [p for p in revlog.parents(node) if p != nullid]
1565 parentlst = [p for p in revlog.parents(node) if p != nullid]
1548 while parentlst:
1566 while parentlst:
1549 n = parentlst.pop()
1567 n = parentlst.pop()
1550 if n not in hasset:
1568 if n not in hasset:
1551 hasset[n] = 1
1569 hasset[n] = 1
1552 p = [p for p in revlog.parents(n) if p != nullid]
1570 p = [p for p in revlog.parents(n) if p != nullid]
1553 parentlst.extend(p)
1571 parentlst.extend(p)
1554 for n in hasset:
1572 for n in hasset:
1555 msngset.pop(n, None)
1573 msngset.pop(n, None)
1556
1574
1557 # This is a function generating function used to set up an environment
1575 # This is a function generating function used to set up an environment
1558 # for the inner function to execute in.
1576 # for the inner function to execute in.
1559 def manifest_and_file_collector(changedfileset):
1577 def manifest_and_file_collector(changedfileset):
1560 # This is an information gathering function that gathers
1578 # This is an information gathering function that gathers
1561 # information from each changeset node that goes out as part of
1579 # information from each changeset node that goes out as part of
1562 # the changegroup. The information gathered is a list of which
1580 # the changegroup. The information gathered is a list of which
1563 # manifest nodes are potentially required (the recipient may
1581 # manifest nodes are potentially required (the recipient may
1564 # already have them) and total list of all files which were
1582 # already have them) and total list of all files which were
1565 # changed in any changeset in the changegroup.
1583 # changed in any changeset in the changegroup.
1566 #
1584 #
1567 # We also remember the first changenode we saw any manifest
1585 # We also remember the first changenode we saw any manifest
1568 # referenced by so we can later determine which changenode 'owns'
1586 # referenced by so we can later determine which changenode 'owns'
1569 # the manifest.
1587 # the manifest.
1570 def collect_manifests_and_files(clnode):
1588 def collect_manifests_and_files(clnode):
1571 c = cl.read(clnode)
1589 c = cl.read(clnode)
1572 for f in c[3]:
1590 for f in c[3]:
1573 # This is to make sure we only have one instance of each
1591 # This is to make sure we only have one instance of each
1574 # filename string for each filename.
1592 # filename string for each filename.
1575 changedfileset.setdefault(f, f)
1593 changedfileset.setdefault(f, f)
1576 msng_mnfst_set.setdefault(c[0], clnode)
1594 msng_mnfst_set.setdefault(c[0], clnode)
1577 return collect_manifests_and_files
1595 return collect_manifests_and_files
1578
1596
1579 # Figure out which manifest nodes (of the ones we think might be part
1597 # Figure out which manifest nodes (of the ones we think might be part
1580 # of the changegroup) the recipient must know about and remove them
1598 # of the changegroup) the recipient must know about and remove them
1581 # from the changegroup.
1599 # from the changegroup.
1582 def prune_manifests():
1600 def prune_manifests():
1583 has_mnfst_set = {}
1601 has_mnfst_set = {}
1584 for n in msng_mnfst_set:
1602 for n in msng_mnfst_set:
1585 # If a 'missing' manifest thinks it belongs to a changenode
1603 # If a 'missing' manifest thinks it belongs to a changenode
1586 # the recipient is assumed to have, obviously the recipient
1604 # the recipient is assumed to have, obviously the recipient
1587 # must have that manifest.
1605 # must have that manifest.
1588 linknode = cl.node(mnfst.linkrev(n))
1606 linknode = cl.node(mnfst.linkrev(n))
1589 if linknode in has_cl_set:
1607 if linknode in has_cl_set:
1590 has_mnfst_set[n] = 1
1608 has_mnfst_set[n] = 1
1591 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1609 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1592
1610
1593 # Use the information collected in collect_manifests_and_files to say
1611 # Use the information collected in collect_manifests_and_files to say
1594 # which changenode any manifestnode belongs to.
1612 # which changenode any manifestnode belongs to.
1595 def lookup_manifest_link(mnfstnode):
1613 def lookup_manifest_link(mnfstnode):
1596 return msng_mnfst_set[mnfstnode]
1614 return msng_mnfst_set[mnfstnode]
1597
1615
1598 # A function generating function that sets up the initial environment
1616 # A function generating function that sets up the initial environment
1599 # the inner function.
1617 # the inner function.
1600 def filenode_collector(changedfiles):
1618 def filenode_collector(changedfiles):
1601 next_rev = [0]
1619 next_rev = [0]
1602 # This gathers information from each manifestnode included in the
1620 # This gathers information from each manifestnode included in the
1603 # changegroup about which filenodes the manifest node references
1621 # changegroup about which filenodes the manifest node references
1604 # so we can include those in the changegroup too.
1622 # so we can include those in the changegroup too.
1605 #
1623 #
1606 # It also remembers which changenode each filenode belongs to. It
1624 # It also remembers which changenode each filenode belongs to. It
1607 # does this by assuming the a filenode belongs to the changenode
1625 # does this by assuming the a filenode belongs to the changenode
1608 # the first manifest that references it belongs to.
1626 # the first manifest that references it belongs to.
1609 def collect_msng_filenodes(mnfstnode):
1627 def collect_msng_filenodes(mnfstnode):
1610 r = mnfst.rev(mnfstnode)
1628 r = mnfst.rev(mnfstnode)
1611 if r == next_rev[0]:
1629 if r == next_rev[0]:
1612 # If the last rev we looked at was the one just previous,
1630 # If the last rev we looked at was the one just previous,
1613 # we only need to see a diff.
1631 # we only need to see a diff.
1614 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1632 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1615 # For each line in the delta
1633 # For each line in the delta
1616 for dline in delta.splitlines():
1634 for dline in delta.splitlines():
1617 # get the filename and filenode for that line
1635 # get the filename and filenode for that line
1618 f, fnode = dline.split('\0')
1636 f, fnode = dline.split('\0')
1619 fnode = bin(fnode[:40])
1637 fnode = bin(fnode[:40])
1620 f = changedfiles.get(f, None)
1638 f = changedfiles.get(f, None)
1621 # And if the file is in the list of files we care
1639 # And if the file is in the list of files we care
1622 # about.
1640 # about.
1623 if f is not None:
1641 if f is not None:
1624 # Get the changenode this manifest belongs to
1642 # Get the changenode this manifest belongs to
1625 clnode = msng_mnfst_set[mnfstnode]
1643 clnode = msng_mnfst_set[mnfstnode]
1626 # Create the set of filenodes for the file if
1644 # Create the set of filenodes for the file if
1627 # there isn't one already.
1645 # there isn't one already.
1628 ndset = msng_filenode_set.setdefault(f, {})
1646 ndset = msng_filenode_set.setdefault(f, {})
1629 # And set the filenode's changelog node to the
1647 # And set the filenode's changelog node to the
1630 # manifest's if it hasn't been set already.
1648 # manifest's if it hasn't been set already.
1631 ndset.setdefault(fnode, clnode)
1649 ndset.setdefault(fnode, clnode)
1632 else:
1650 else:
1633 # Otherwise we need a full manifest.
1651 # Otherwise we need a full manifest.
1634 m = mnfst.read(mnfstnode)
1652 m = mnfst.read(mnfstnode)
1635 # For every file in we care about.
1653 # For every file in we care about.
1636 for f in changedfiles:
1654 for f in changedfiles:
1637 fnode = m.get(f, None)
1655 fnode = m.get(f, None)
1638 # If it's in the manifest
1656 # If it's in the manifest
1639 if fnode is not None:
1657 if fnode is not None:
1640 # See comments above.
1658 # See comments above.
1641 clnode = msng_mnfst_set[mnfstnode]
1659 clnode = msng_mnfst_set[mnfstnode]
1642 ndset = msng_filenode_set.setdefault(f, {})
1660 ndset = msng_filenode_set.setdefault(f, {})
1643 ndset.setdefault(fnode, clnode)
1661 ndset.setdefault(fnode, clnode)
1644 # Remember the revision we hope to see next.
1662 # Remember the revision we hope to see next.
1645 next_rev[0] = r + 1
1663 next_rev[0] = r + 1
1646 return collect_msng_filenodes
1664 return collect_msng_filenodes
1647
1665
1648 # We have a list of filenodes we think we need for a file, lets remove
1666 # We have a list of filenodes we think we need for a file, lets remove
1649 # all those we now the recipient must have.
1667 # all those we now the recipient must have.
1650 def prune_filenodes(f, filerevlog):
1668 def prune_filenodes(f, filerevlog):
1651 msngset = msng_filenode_set[f]
1669 msngset = msng_filenode_set[f]
1652 hasset = {}
1670 hasset = {}
1653 # If a 'missing' filenode thinks it belongs to a changenode we
1671 # If a 'missing' filenode thinks it belongs to a changenode we
1654 # assume the recipient must have, then the recipient must have
1672 # assume the recipient must have, then the recipient must have
1655 # that filenode.
1673 # that filenode.
1656 for n in msngset:
1674 for n in msngset:
1657 clnode = cl.node(filerevlog.linkrev(n))
1675 clnode = cl.node(filerevlog.linkrev(n))
1658 if clnode in has_cl_set:
1676 if clnode in has_cl_set:
1659 hasset[n] = 1
1677 hasset[n] = 1
1660 prune_parents(filerevlog, hasset, msngset)
1678 prune_parents(filerevlog, hasset, msngset)
1661
1679
1662 # A function generator function that sets up the a context for the
1680 # A function generator function that sets up the a context for the
1663 # inner function.
1681 # inner function.
1664 def lookup_filenode_link_func(fname):
1682 def lookup_filenode_link_func(fname):
1665 msngset = msng_filenode_set[fname]
1683 msngset = msng_filenode_set[fname]
1666 # Lookup the changenode the filenode belongs to.
1684 # Lookup the changenode the filenode belongs to.
1667 def lookup_filenode_link(fnode):
1685 def lookup_filenode_link(fnode):
1668 return msngset[fnode]
1686 return msngset[fnode]
1669 return lookup_filenode_link
1687 return lookup_filenode_link
1670
1688
1671 # Now that we have all theses utility functions to help out and
1689 # Now that we have all theses utility functions to help out and
1672 # logically divide up the task, generate the group.
1690 # logically divide up the task, generate the group.
1673 def gengroup():
1691 def gengroup():
1674 # The set of changed files starts empty.
1692 # The set of changed files starts empty.
1675 changedfiles = {}
1693 changedfiles = {}
1676 # Create a changenode group generator that will call our functions
1694 # Create a changenode group generator that will call our functions
1677 # back to lookup the owning changenode and collect information.
1695 # back to lookup the owning changenode and collect information.
1678 group = cl.group(msng_cl_lst, identity,
1696 group = cl.group(msng_cl_lst, identity,
1679 manifest_and_file_collector(changedfiles))
1697 manifest_and_file_collector(changedfiles))
1680 for chnk in group:
1698 for chnk in group:
1681 yield chnk
1699 yield chnk
1682
1700
1683 # The list of manifests has been collected by the generator
1701 # The list of manifests has been collected by the generator
1684 # calling our functions back.
1702 # calling our functions back.
1685 prune_manifests()
1703 prune_manifests()
1686 msng_mnfst_lst = msng_mnfst_set.keys()
1704 msng_mnfst_lst = msng_mnfst_set.keys()
1687 # Sort the manifestnodes by revision number.
1705 # Sort the manifestnodes by revision number.
1688 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1706 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1689 # Create a generator for the manifestnodes that calls our lookup
1707 # Create a generator for the manifestnodes that calls our lookup
1690 # and data collection functions back.
1708 # and data collection functions back.
1691 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1709 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1692 filenode_collector(changedfiles))
1710 filenode_collector(changedfiles))
1693 for chnk in group:
1711 for chnk in group:
1694 yield chnk
1712 yield chnk
1695
1713
1696 # These are no longer needed, dereference and toss the memory for
1714 # These are no longer needed, dereference and toss the memory for
1697 # them.
1715 # them.
1698 msng_mnfst_lst = None
1716 msng_mnfst_lst = None
1699 msng_mnfst_set.clear()
1717 msng_mnfst_set.clear()
1700
1718
1701 changedfiles = changedfiles.keys()
1719 changedfiles = changedfiles.keys()
1702 changedfiles.sort()
1720 changedfiles.sort()
1703 # Go through all our files in order sorted by name.
1721 # Go through all our files in order sorted by name.
1704 for fname in changedfiles:
1722 for fname in changedfiles:
1705 filerevlog = self.file(fname)
1723 filerevlog = self.file(fname)
1706 # Toss out the filenodes that the recipient isn't really
1724 # Toss out the filenodes that the recipient isn't really
1707 # missing.
1725 # missing.
1708 if msng_filenode_set.has_key(fname):
1726 if msng_filenode_set.has_key(fname):
1709 prune_filenodes(fname, filerevlog)
1727 prune_filenodes(fname, filerevlog)
1710 msng_filenode_lst = msng_filenode_set[fname].keys()
1728 msng_filenode_lst = msng_filenode_set[fname].keys()
1711 else:
1729 else:
1712 msng_filenode_lst = []
1730 msng_filenode_lst = []
1713 # If any filenodes are left, generate the group for them,
1731 # If any filenodes are left, generate the group for them,
1714 # otherwise don't bother.
1732 # otherwise don't bother.
1715 if len(msng_filenode_lst) > 0:
1733 if len(msng_filenode_lst) > 0:
1716 yield changegroup.genchunk(fname)
1734 yield changegroup.genchunk(fname)
1717 # Sort the filenodes by their revision #
1735 # Sort the filenodes by their revision #
1718 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1736 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1719 # Create a group generator and only pass in a changenode
1737 # Create a group generator and only pass in a changenode
1720 # lookup function as we need to collect no information
1738 # lookup function as we need to collect no information
1721 # from filenodes.
1739 # from filenodes.
1722 group = filerevlog.group(msng_filenode_lst,
1740 group = filerevlog.group(msng_filenode_lst,
1723 lookup_filenode_link_func(fname))
1741 lookup_filenode_link_func(fname))
1724 for chnk in group:
1742 for chnk in group:
1725 yield chnk
1743 yield chnk
1726 if msng_filenode_set.has_key(fname):
1744 if msng_filenode_set.has_key(fname):
1727 # Don't need this anymore, toss it to free memory.
1745 # Don't need this anymore, toss it to free memory.
1728 del msng_filenode_set[fname]
1746 del msng_filenode_set[fname]
1729 # Signal that no more groups are left.
1747 # Signal that no more groups are left.
1730 yield changegroup.closechunk()
1748 yield changegroup.closechunk()
1731
1749
1732 if msng_cl_lst:
1750 if msng_cl_lst:
1733 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1751 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1734
1752
1735 return util.chunkbuffer(gengroup())
1753 return util.chunkbuffer(gengroup())
1736
1754
1737 def changegroup(self, basenodes, source):
1755 def changegroup(self, basenodes, source):
1738 """Generate a changegroup of all nodes that we have that a recipient
1756 """Generate a changegroup of all nodes that we have that a recipient
1739 doesn't.
1757 doesn't.
1740
1758
1741 This is much easier than the previous function as we can assume that
1759 This is much easier than the previous function as we can assume that
1742 the recipient has any changenode we aren't sending them."""
1760 the recipient has any changenode we aren't sending them."""
1743
1761
1744 self.hook('preoutgoing', throw=True, source=source)
1762 self.hook('preoutgoing', throw=True, source=source)
1745
1763
1746 cl = self.changelog
1764 cl = self.changelog
1747 nodes = cl.nodesbetween(basenodes, None)[0]
1765 nodes = cl.nodesbetween(basenodes, None)[0]
1748 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1766 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1749 self.changegroupinfo(nodes)
1767 self.changegroupinfo(nodes)
1750
1768
1751 def identity(x):
1769 def identity(x):
1752 return x
1770 return x
1753
1771
1754 def gennodelst(revlog):
1772 def gennodelst(revlog):
1755 for r in xrange(0, revlog.count()):
1773 for r in xrange(0, revlog.count()):
1756 n = revlog.node(r)
1774 n = revlog.node(r)
1757 if revlog.linkrev(n) in revset:
1775 if revlog.linkrev(n) in revset:
1758 yield n
1776 yield n
1759
1777
1760 def changed_file_collector(changedfileset):
1778 def changed_file_collector(changedfileset):
1761 def collect_changed_files(clnode):
1779 def collect_changed_files(clnode):
1762 c = cl.read(clnode)
1780 c = cl.read(clnode)
1763 for fname in c[3]:
1781 for fname in c[3]:
1764 changedfileset[fname] = 1
1782 changedfileset[fname] = 1
1765 return collect_changed_files
1783 return collect_changed_files
1766
1784
1767 def lookuprevlink_func(revlog):
1785 def lookuprevlink_func(revlog):
1768 def lookuprevlink(n):
1786 def lookuprevlink(n):
1769 return cl.node(revlog.linkrev(n))
1787 return cl.node(revlog.linkrev(n))
1770 return lookuprevlink
1788 return lookuprevlink
1771
1789
1772 def gengroup():
1790 def gengroup():
1773 # construct a list of all changed files
1791 # construct a list of all changed files
1774 changedfiles = {}
1792 changedfiles = {}
1775
1793
1776 for chnk in cl.group(nodes, identity,
1794 for chnk in cl.group(nodes, identity,
1777 changed_file_collector(changedfiles)):
1795 changed_file_collector(changedfiles)):
1778 yield chnk
1796 yield chnk
1779 changedfiles = changedfiles.keys()
1797 changedfiles = changedfiles.keys()
1780 changedfiles.sort()
1798 changedfiles.sort()
1781
1799
1782 mnfst = self.manifest
1800 mnfst = self.manifest
1783 nodeiter = gennodelst(mnfst)
1801 nodeiter = gennodelst(mnfst)
1784 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1802 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1785 yield chnk
1803 yield chnk
1786
1804
1787 for fname in changedfiles:
1805 for fname in changedfiles:
1788 filerevlog = self.file(fname)
1806 filerevlog = self.file(fname)
1789 nodeiter = gennodelst(filerevlog)
1807 nodeiter = gennodelst(filerevlog)
1790 nodeiter = list(nodeiter)
1808 nodeiter = list(nodeiter)
1791 if nodeiter:
1809 if nodeiter:
1792 yield changegroup.genchunk(fname)
1810 yield changegroup.genchunk(fname)
1793 lookup = lookuprevlink_func(filerevlog)
1811 lookup = lookuprevlink_func(filerevlog)
1794 for chnk in filerevlog.group(nodeiter, lookup):
1812 for chnk in filerevlog.group(nodeiter, lookup):
1795 yield chnk
1813 yield chnk
1796
1814
1797 yield changegroup.closechunk()
1815 yield changegroup.closechunk()
1798
1816
1799 if nodes:
1817 if nodes:
1800 self.hook('outgoing', node=hex(nodes[0]), source=source)
1818 self.hook('outgoing', node=hex(nodes[0]), source=source)
1801
1819
1802 return util.chunkbuffer(gengroup())
1820 return util.chunkbuffer(gengroup())
1803
1821
1804 def addchangegroup(self, source, srctype, url):
1822 def addchangegroup(self, source, srctype, url):
1805 """add changegroup to repo.
1823 """add changegroup to repo.
1806
1824
1807 return values:
1825 return values:
1808 - nothing changed or no source: 0
1826 - nothing changed or no source: 0
1809 - more heads than before: 1+added heads (2..n)
1827 - more heads than before: 1+added heads (2..n)
1810 - less heads than before: -1-removed heads (-2..-n)
1828 - less heads than before: -1-removed heads (-2..-n)
1811 - number of heads stays the same: 1
1829 - number of heads stays the same: 1
1812 """
1830 """
1813 def csmap(x):
1831 def csmap(x):
1814 self.ui.debug(_("add changeset %s\n") % short(x))
1832 self.ui.debug(_("add changeset %s\n") % short(x))
1815 return cl.count()
1833 return cl.count()
1816
1834
1817 def revmap(x):
1835 def revmap(x):
1818 return cl.rev(x)
1836 return cl.rev(x)
1819
1837
1820 if not source:
1838 if not source:
1821 return 0
1839 return 0
1822
1840
1823 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1841 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1824
1842
1825 changesets = files = revisions = 0
1843 changesets = files = revisions = 0
1826
1844
1827 tr = self.transaction()
1845 tr = self.transaction()
1828
1846
1829 # write changelog data to temp files so concurrent readers will not see
1847 # write changelog data to temp files so concurrent readers will not see
1830 # inconsistent view
1848 # inconsistent view
1831 cl = None
1849 cl = None
1832 try:
1850 try:
1833 cl = appendfile.appendchangelog(self.sopener,
1851 cl = appendfile.appendchangelog(self.sopener,
1834 self.changelog.version)
1852 self.changelog.version)
1835
1853
1836 oldheads = len(cl.heads())
1854 oldheads = len(cl.heads())
1837
1855
1838 # pull off the changeset group
1856 # pull off the changeset group
1839 self.ui.status(_("adding changesets\n"))
1857 self.ui.status(_("adding changesets\n"))
1840 cor = cl.count() - 1
1858 cor = cl.count() - 1
1841 chunkiter = changegroup.chunkiter(source)
1859 chunkiter = changegroup.chunkiter(source)
1842 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1860 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1843 raise util.Abort(_("received changelog group is empty"))
1861 raise util.Abort(_("received changelog group is empty"))
1844 cnr = cl.count() - 1
1862 cnr = cl.count() - 1
1845 changesets = cnr - cor
1863 changesets = cnr - cor
1846
1864
1847 # pull off the manifest group
1865 # pull off the manifest group
1848 self.ui.status(_("adding manifests\n"))
1866 self.ui.status(_("adding manifests\n"))
1849 chunkiter = changegroup.chunkiter(source)
1867 chunkiter = changegroup.chunkiter(source)
1850 # no need to check for empty manifest group here:
1868 # no need to check for empty manifest group here:
1851 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1869 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1852 # no new manifest will be created and the manifest group will
1870 # no new manifest will be created and the manifest group will
1853 # be empty during the pull
1871 # be empty during the pull
1854 self.manifest.addgroup(chunkiter, revmap, tr)
1872 self.manifest.addgroup(chunkiter, revmap, tr)
1855
1873
1856 # process the files
1874 # process the files
1857 self.ui.status(_("adding file changes\n"))
1875 self.ui.status(_("adding file changes\n"))
1858 while 1:
1876 while 1:
1859 f = changegroup.getchunk(source)
1877 f = changegroup.getchunk(source)
1860 if not f:
1878 if not f:
1861 break
1879 break
1862 self.ui.debug(_("adding %s revisions\n") % f)
1880 self.ui.debug(_("adding %s revisions\n") % f)
1863 fl = self.file(f)
1881 fl = self.file(f)
1864 o = fl.count()
1882 o = fl.count()
1865 chunkiter = changegroup.chunkiter(source)
1883 chunkiter = changegroup.chunkiter(source)
1866 if fl.addgroup(chunkiter, revmap, tr) is None:
1884 if fl.addgroup(chunkiter, revmap, tr) is None:
1867 raise util.Abort(_("received file revlog group is empty"))
1885 raise util.Abort(_("received file revlog group is empty"))
1868 revisions += fl.count() - o
1886 revisions += fl.count() - o
1869 files += 1
1887 files += 1
1870
1888
1871 cl.writedata()
1889 cl.writedata()
1872 finally:
1890 finally:
1873 if cl:
1891 if cl:
1874 cl.cleanup()
1892 cl.cleanup()
1875
1893
1876 # make changelog see real files again
1894 # make changelog see real files again
1877 self.changelog = changelog.changelog(self.sopener,
1895 self.changelog = changelog.changelog(self.sopener,
1878 self.changelog.version)
1896 self.changelog.version)
1879 self.changelog.checkinlinesize(tr)
1897 self.changelog.checkinlinesize(tr)
1880
1898
1881 newheads = len(self.changelog.heads())
1899 newheads = len(self.changelog.heads())
1882 heads = ""
1900 heads = ""
1883 if oldheads and newheads != oldheads:
1901 if oldheads and newheads != oldheads:
1884 heads = _(" (%+d heads)") % (newheads - oldheads)
1902 heads = _(" (%+d heads)") % (newheads - oldheads)
1885
1903
1886 self.ui.status(_("added %d changesets"
1904 self.ui.status(_("added %d changesets"
1887 " with %d changes to %d files%s\n")
1905 " with %d changes to %d files%s\n")
1888 % (changesets, revisions, files, heads))
1906 % (changesets, revisions, files, heads))
1889
1907
1890 if changesets > 0:
1908 if changesets > 0:
1891 self.hook('pretxnchangegroup', throw=True,
1909 self.hook('pretxnchangegroup', throw=True,
1892 node=hex(self.changelog.node(cor+1)), source=srctype,
1910 node=hex(self.changelog.node(cor+1)), source=srctype,
1893 url=url)
1911 url=url)
1894
1912
1895 tr.close()
1913 tr.close()
1896
1914
1897 if changesets > 0:
1915 if changesets > 0:
1898 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1916 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1899 source=srctype, url=url)
1917 source=srctype, url=url)
1900
1918
1901 for i in xrange(cor + 1, cnr + 1):
1919 for i in xrange(cor + 1, cnr + 1):
1902 self.hook("incoming", node=hex(self.changelog.node(i)),
1920 self.hook("incoming", node=hex(self.changelog.node(i)),
1903 source=srctype, url=url)
1921 source=srctype, url=url)
1904
1922
1905 # never return 0 here:
1923 # never return 0 here:
1906 if newheads < oldheads:
1924 if newheads < oldheads:
1907 return newheads - oldheads - 1
1925 return newheads - oldheads - 1
1908 else:
1926 else:
1909 return newheads - oldheads + 1
1927 return newheads - oldheads + 1
1910
1928
1911
1929
1912 def stream_in(self, remote):
1930 def stream_in(self, remote):
1913 fp = remote.stream_out()
1931 fp = remote.stream_out()
1914 l = fp.readline()
1932 l = fp.readline()
1915 try:
1933 try:
1916 resp = int(l)
1934 resp = int(l)
1917 except ValueError:
1935 except ValueError:
1918 raise util.UnexpectedOutput(
1936 raise util.UnexpectedOutput(
1919 _('Unexpected response from remote server:'), l)
1937 _('Unexpected response from remote server:'), l)
1920 if resp == 1:
1938 if resp == 1:
1921 raise util.Abort(_('operation forbidden by server'))
1939 raise util.Abort(_('operation forbidden by server'))
1922 elif resp == 2:
1940 elif resp == 2:
1923 raise util.Abort(_('locking the remote repository failed'))
1941 raise util.Abort(_('locking the remote repository failed'))
1924 elif resp != 0:
1942 elif resp != 0:
1925 raise util.Abort(_('the server sent an unknown error code'))
1943 raise util.Abort(_('the server sent an unknown error code'))
1926 self.ui.status(_('streaming all changes\n'))
1944 self.ui.status(_('streaming all changes\n'))
1927 l = fp.readline()
1945 l = fp.readline()
1928 try:
1946 try:
1929 total_files, total_bytes = map(int, l.split(' ', 1))
1947 total_files, total_bytes = map(int, l.split(' ', 1))
1930 except ValueError, TypeError:
1948 except ValueError, TypeError:
1931 raise util.UnexpectedOutput(
1949 raise util.UnexpectedOutput(
1932 _('Unexpected response from remote server:'), l)
1950 _('Unexpected response from remote server:'), l)
1933 self.ui.status(_('%d files to transfer, %s of data\n') %
1951 self.ui.status(_('%d files to transfer, %s of data\n') %
1934 (total_files, util.bytecount(total_bytes)))
1952 (total_files, util.bytecount(total_bytes)))
1935 start = time.time()
1953 start = time.time()
1936 for i in xrange(total_files):
1954 for i in xrange(total_files):
1937 # XXX doesn't support '\n' or '\r' in filenames
1955 # XXX doesn't support '\n' or '\r' in filenames
1938 l = fp.readline()
1956 l = fp.readline()
1939 try:
1957 try:
1940 name, size = l.split('\0', 1)
1958 name, size = l.split('\0', 1)
1941 size = int(size)
1959 size = int(size)
1942 except ValueError, TypeError:
1960 except ValueError, TypeError:
1943 raise util.UnexpectedOutput(
1961 raise util.UnexpectedOutput(
1944 _('Unexpected response from remote server:'), l)
1962 _('Unexpected response from remote server:'), l)
1945 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1963 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1946 ofp = self.sopener(name, 'w')
1964 ofp = self.sopener(name, 'w')
1947 for chunk in util.filechunkiter(fp, limit=size):
1965 for chunk in util.filechunkiter(fp, limit=size):
1948 ofp.write(chunk)
1966 ofp.write(chunk)
1949 ofp.close()
1967 ofp.close()
1950 elapsed = time.time() - start
1968 elapsed = time.time() - start
1951 if elapsed <= 0:
1969 if elapsed <= 0:
1952 elapsed = 0.001
1970 elapsed = 0.001
1953 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1971 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1954 (util.bytecount(total_bytes), elapsed,
1972 (util.bytecount(total_bytes), elapsed,
1955 util.bytecount(total_bytes / elapsed)))
1973 util.bytecount(total_bytes / elapsed)))
1956 self.reload()
1974 self.reload()
1957 return len(self.heads()) + 1
1975 return len(self.heads()) + 1
1958
1976
1959 def clone(self, remote, heads=[], stream=False):
1977 def clone(self, remote, heads=[], stream=False):
1960 '''clone remote repository.
1978 '''clone remote repository.
1961
1979
1962 keyword arguments:
1980 keyword arguments:
1963 heads: list of revs to clone (forces use of pull)
1981 heads: list of revs to clone (forces use of pull)
1964 stream: use streaming clone if possible'''
1982 stream: use streaming clone if possible'''
1965
1983
1966 # now, all clients that can request uncompressed clones can
1984 # now, all clients that can request uncompressed clones can
1967 # read repo formats supported by all servers that can serve
1985 # read repo formats supported by all servers that can serve
1968 # them.
1986 # them.
1969
1987
1970 # if revlog format changes, client will have to check version
1988 # if revlog format changes, client will have to check version
1971 # and format flags on "stream" capability, and use
1989 # and format flags on "stream" capability, and use
1972 # uncompressed only if compatible.
1990 # uncompressed only if compatible.
1973
1991
1974 if stream and not heads and remote.capable('stream'):
1992 if stream and not heads and remote.capable('stream'):
1975 return self.stream_in(remote)
1993 return self.stream_in(remote)
1976 return self.pull(remote, heads)
1994 return self.pull(remote, heads)
1977
1995
1978 # used to avoid circular references so destructors work
1996 # used to avoid circular references so destructors work
1979 def aftertrans(files):
1997 def aftertrans(files):
1980 renamefiles = [tuple(t) for t in files]
1998 renamefiles = [tuple(t) for t in files]
1981 def a():
1999 def a():
1982 for src, dest in renamefiles:
2000 for src, dest in renamefiles:
1983 util.rename(src, dest)
2001 util.rename(src, dest)
1984 return a
2002 return a
1985
2003
1986 def instance(ui, path, create):
2004 def instance(ui, path, create):
1987 return localrepository(ui, util.drop_scheme('file', path), create)
2005 return localrepository(ui, util.drop_scheme('file', path), create)
1988
2006
1989 def islocal(path):
2007 def islocal(path):
1990 return True
2008 return True
@@ -1,53 +1,59
1 # mq patch on an empty repo
1 # mq patch on an empty repo
2 tip: 0
2 tip: 0
3 No .hg/branches.cache
3 No .hg/branches.cache
4 tip: 0
4 tip: 0
5 No .hg/branches.cache
5 No .hg/branches.cache
6
6
7 # some regular revisions
7 # some regular revisions
8 Patch queue now empty
8 Patch queue now empty
9 tip: 1
9 tip: 1
10 features: unnamed
10 3f910abad313ff802d3a23a7529433872df9b3ae 1
11 3f910abad313ff802d3a23a7529433872df9b3ae 1
11 3f910abad313ff802d3a23a7529433872df9b3ae bar
12 3f910abad313ff802d3a23a7529433872df9b3ae bar
12 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
13 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
13
14
14 # add some mq patches
15 # add some mq patches
15 applying p1
16 applying p1
16 Now at: p1
17 Now at: p1
17 tip: 2
18 tip: 2
19 features: unnamed
18 3f910abad313ff802d3a23a7529433872df9b3ae 1
20 3f910abad313ff802d3a23a7529433872df9b3ae 1
19 3f910abad313ff802d3a23a7529433872df9b3ae bar
21 3f910abad313ff802d3a23a7529433872df9b3ae bar
20 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
22 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
21 tip: 3
23 tip: 3
24 features: unnamed
22 3f910abad313ff802d3a23a7529433872df9b3ae 1
25 3f910abad313ff802d3a23a7529433872df9b3ae 1
23 3f910abad313ff802d3a23a7529433872df9b3ae bar
26 3f910abad313ff802d3a23a7529433872df9b3ae bar
24 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
27 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
25 branch foo: 3
28 branch foo: 3
26 branch bar: 2
29 branch bar: 2
27
30
28 # removing the cache
31 # removing the cache
29 tip: 3
32 tip: 3
33 features: unnamed
30 3f910abad313ff802d3a23a7529433872df9b3ae 1
34 3f910abad313ff802d3a23a7529433872df9b3ae 1
31 3f910abad313ff802d3a23a7529433872df9b3ae bar
35 3f910abad313ff802d3a23a7529433872df9b3ae bar
32 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
36 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
33 branch foo: 3
37 branch foo: 3
34 branch bar: 2
38 branch bar: 2
35
39
36 # importing rev 1 (the cache now ends in one of the patches)
40 # importing rev 1 (the cache now ends in one of the patches)
37 tip: 3
41 tip: 3
42 features: unnamed
38 3f910abad313ff802d3a23a7529433872df9b3ae 1
43 3f910abad313ff802d3a23a7529433872df9b3ae 1
39 3f910abad313ff802d3a23a7529433872df9b3ae bar
44 3f910abad313ff802d3a23a7529433872df9b3ae bar
40 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
45 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
41 branch foo: 3
46 branch foo: 3
42 branch bar: 2
47 branch bar: 2
43 qbase: 1
48 qbase: 1
44
49
45 # detect an invalid cache
50 # detect an invalid cache
46 Patch queue now empty
51 Patch queue now empty
47 applying p0
52 applying p0
48 applying p1
53 applying p1
49 applying p2
54 applying p2
50 Now at: p2
55 Now at: p2
51 tip: 3
56 tip: 3
57 features: unnamed
52 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff 0
58 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff 0
53 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
59 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
@@ -1,80 +1,81
1 foo
1 foo
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 foo
3 foo
4 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
4 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 (branch merge, don't forget to commit)
5 (branch merge, don't forget to commit)
6 foo
6 foo
7 changeset: 5:5f8fb06e083e
7 changeset: 5:5f8fb06e083e
8 branch: foo
8 branch: foo
9 tag: tip
9 tag: tip
10 parent: 4:4909a3732169
10 parent: 4:4909a3732169
11 parent: 3:bf1bc2f45e83
11 parent: 3:bf1bc2f45e83
12 user: test
12 user: test
13 date: Mon Jan 12 13:46:40 1970 +0000
13 date: Mon Jan 12 13:46:40 1970 +0000
14 summary: merge
14 summary: merge
15
15
16 changeset: 4:4909a3732169
16 changeset: 4:4909a3732169
17 branch: foo
17 branch: foo
18 parent: 1:b699b1cec9c2
18 parent: 1:b699b1cec9c2
19 user: test
19 user: test
20 date: Mon Jan 12 13:46:40 1970 +0000
20 date: Mon Jan 12 13:46:40 1970 +0000
21 summary: modify a branch
21 summary: modify a branch
22
22
23 changeset: 3:bf1bc2f45e83
23 changeset: 3:bf1bc2f45e83
24 user: test
24 user: test
25 date: Mon Jan 12 13:46:40 1970 +0000
25 date: Mon Jan 12 13:46:40 1970 +0000
26 summary: clear branch name
26 summary: clear branch name
27
27
28 changeset: 2:67ec16bde7f1
28 changeset: 2:67ec16bde7f1
29 branch: bar
29 branch: bar
30 user: test
30 user: test
31 date: Mon Jan 12 13:46:40 1970 +0000
31 date: Mon Jan 12 13:46:40 1970 +0000
32 summary: change branch name
32 summary: change branch name
33
33
34 changeset: 1:b699b1cec9c2
34 changeset: 1:b699b1cec9c2
35 branch: foo
35 branch: foo
36 user: test
36 user: test
37 date: Mon Jan 12 13:46:40 1970 +0000
37 date: Mon Jan 12 13:46:40 1970 +0000
38 summary: add branch name
38 summary: add branch name
39
39
40 changeset: 0:be8523e69bf8
40 changeset: 0:be8523e69bf8
41 user: test
41 user: test
42 date: Mon Jan 12 13:46:40 1970 +0000
42 date: Mon Jan 12 13:46:40 1970 +0000
43 summary: initial
43 summary: initial
44
44
45 foo 5:5f8fb06e083e
45 foo 5:5f8fb06e083e
46 3:bf1bc2f45e83
46 3:bf1bc2f45e83
47 bar 2:67ec16bde7f1
47 bar 2:67ec16bde7f1
48 foo
48 foo
49
49
50 bar
50 bar
51 % test for invalid branch cache
51 % test for invalid branch cache
52 rolling back last transaction
52 rolling back last transaction
53 changeset: 4:4909a3732169
53 changeset: 4:4909a3732169
54 branch: foo
54 branch: foo
55 tag: tip
55 tag: tip
56 parent: 1:b699b1cec9c2
56 parent: 1:b699b1cec9c2
57 user: test
57 user: test
58 date: Mon Jan 12 13:46:40 1970 +0000
58 date: Mon Jan 12 13:46:40 1970 +0000
59 summary: modify a branch
59 summary: modify a branch
60
60
61 Invalid branch cache: unknown tip
61 Invalid branch cache: unknown tip
62 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
62 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
63 branch: foo
63 branch: foo
64 tag: tip
64 tag: tip
65 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
65 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
66 parent: -1:0000000000000000000000000000000000000000
66 parent: -1:0000000000000000000000000000000000000000
67 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
67 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
68 user: test
68 user: test
69 date: Mon Jan 12 13:46:40 1970 +0000
69 date: Mon Jan 12 13:46:40 1970 +0000
70 files: a
70 files: a
71 extra: branch=foo
71 extra: branch=foo
72 description:
72 description:
73 modify a branch
73 modify a branch
74
74
75
75
76 4:4909a3732169
76 4:4909a3732169
77 features: unnamed
77 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
78 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
78 bf1bc2f45e834c75404d0ddab57d53beab56e2f8
79 bf1bc2f45e834c75404d0ddab57d53beab56e2f8
79 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
80 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
80 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
81 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
General Comments 0
You need to be logged in to leave comments. Login now