##// END OF EJS Templates
Make sure the changelog mentions files whose flags changed...
Alexis S. L. Carvalho -
r4530:0ac7fee4 default
parent child Browse files
Show More
@@ -1,1956 +1,1965
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.root = os.path.realpath(path)
33 self.root = os.path.realpath(path)
34 self.path = os.path.join(self.root, ".hg")
34 self.path = os.path.join(self.root, ".hg")
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 requirements = ["revlogv1"]
44 requirements = ["revlogv1"]
45 if parentui.configbool('format', 'usestore', True):
45 if parentui.configbool('format', 'usestore', True):
46 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements.append("store")
47 requirements.append("store")
48 # create an invalid changelog
48 # create an invalid changelog
49 self.opener("00changelog.i", "a").write(
49 self.opener("00changelog.i", "a").write(
50 '\0\0\0\2' # represents revlogv2
50 '\0\0\0\2' # represents revlogv2
51 ' dummy changelog to prevent using the old repo layout'
51 ' dummy changelog to prevent using the old repo layout'
52 )
52 )
53 reqfile = self.opener("requires", "w")
53 reqfile = self.opener("requires", "w")
54 for r in requirements:
54 for r in requirements:
55 reqfile.write("%s\n" % r)
55 reqfile.write("%s\n" % r)
56 reqfile.close()
56 reqfile.close()
57 else:
57 else:
58 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
59 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
61 else:
62 # find requirements
62 # find requirements
63 try:
63 try:
64 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
65 except IOError, inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 requirements = []
68 requirements = []
69 # check them
69 # check them
70 for r in requirements:
70 for r in requirements:
71 if r not in self.supported:
71 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
73
74 # setup store
74 # setup store
75 if "store" in requirements:
75 if "store" in requirements:
76 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
79 else:
79 else:
80 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.spath = self.path
82 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
84
85 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
86 try:
86 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
88 except IOError:
89 pass
89 pass
90
90
91 self.changelog = changelog.changelog(self.sopener)
91 self.changelog = changelog.changelog(self.sopener)
92 self.sopener.defversion = self.changelog.version
92 self.sopener.defversion = self.changelog.version
93 self.manifest = manifest.manifest(self.sopener)
93 self.manifest = manifest.manifest(self.sopener)
94
94
95 fallback = self.ui.config('ui', 'fallbackencoding')
95 fallback = self.ui.config('ui', 'fallbackencoding')
96 if fallback:
96 if fallback:
97 util._fallbackencoding = fallback
97 util._fallbackencoding = fallback
98
98
99 self.tagscache = None
99 self.tagscache = None
100 self.branchcache = None
100 self.branchcache = None
101 self.nodetagscache = None
101 self.nodetagscache = None
102 self.filterpats = {}
102 self.filterpats = {}
103 self.transhandle = None
103 self.transhandle = None
104
104
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
106
106
107 def url(self):
107 def url(self):
108 return 'file:' + self.root
108 return 'file:' + self.root
109
109
110 def hook(self, name, throw=False, **args):
110 def hook(self, name, throw=False, **args):
111 def callhook(hname, funcname):
111 def callhook(hname, funcname):
112 '''call python hook. hook is callable object, looked up as
112 '''call python hook. hook is callable object, looked up as
113 name in python module. if callable returns "true", hook
113 name in python module. if callable returns "true", hook
114 fails, else passes. if hook raises exception, treated as
114 fails, else passes. if hook raises exception, treated as
115 hook failure. exception propagates if throw is "true".
115 hook failure. exception propagates if throw is "true".
116
116
117 reason for "true" meaning "hook failed" is so that
117 reason for "true" meaning "hook failed" is so that
118 unmodified commands (e.g. mercurial.commands.update) can
118 unmodified commands (e.g. mercurial.commands.update) can
119 be run as hooks without wrappers to convert return values.'''
119 be run as hooks without wrappers to convert return values.'''
120
120
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
122 obj = funcname
122 obj = funcname
123 if not callable(obj):
123 if not callable(obj):
124 d = funcname.rfind('.')
124 d = funcname.rfind('.')
125 if d == -1:
125 if d == -1:
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
127 'a module)') % (hname, funcname))
127 'a module)') % (hname, funcname))
128 modname = funcname[:d]
128 modname = funcname[:d]
129 try:
129 try:
130 obj = __import__(modname)
130 obj = __import__(modname)
131 except ImportError:
131 except ImportError:
132 try:
132 try:
133 # extensions are loaded with hgext_ prefix
133 # extensions are loaded with hgext_ prefix
134 obj = __import__("hgext_%s" % modname)
134 obj = __import__("hgext_%s" % modname)
135 except ImportError:
135 except ImportError:
136 raise util.Abort(_('%s hook is invalid '
136 raise util.Abort(_('%s hook is invalid '
137 '(import of "%s" failed)') %
137 '(import of "%s" failed)') %
138 (hname, modname))
138 (hname, modname))
139 try:
139 try:
140 for p in funcname.split('.')[1:]:
140 for p in funcname.split('.')[1:]:
141 obj = getattr(obj, p)
141 obj = getattr(obj, p)
142 except AttributeError, err:
142 except AttributeError, err:
143 raise util.Abort(_('%s hook is invalid '
143 raise util.Abort(_('%s hook is invalid '
144 '("%s" is not defined)') %
144 '("%s" is not defined)') %
145 (hname, funcname))
145 (hname, funcname))
146 if not callable(obj):
146 if not callable(obj):
147 raise util.Abort(_('%s hook is invalid '
147 raise util.Abort(_('%s hook is invalid '
148 '("%s" is not callable)') %
148 '("%s" is not callable)') %
149 (hname, funcname))
149 (hname, funcname))
150 try:
150 try:
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
152 except (KeyboardInterrupt, util.SignalInterrupt):
152 except (KeyboardInterrupt, util.SignalInterrupt):
153 raise
153 raise
154 except Exception, exc:
154 except Exception, exc:
155 if isinstance(exc, util.Abort):
155 if isinstance(exc, util.Abort):
156 self.ui.warn(_('error: %s hook failed: %s\n') %
156 self.ui.warn(_('error: %s hook failed: %s\n') %
157 (hname, exc.args[0]))
157 (hname, exc.args[0]))
158 else:
158 else:
159 self.ui.warn(_('error: %s hook raised an exception: '
159 self.ui.warn(_('error: %s hook raised an exception: '
160 '%s\n') % (hname, exc))
160 '%s\n') % (hname, exc))
161 if throw:
161 if throw:
162 raise
162 raise
163 self.ui.print_exc()
163 self.ui.print_exc()
164 return True
164 return True
165 if r:
165 if r:
166 if throw:
166 if throw:
167 raise util.Abort(_('%s hook failed') % hname)
167 raise util.Abort(_('%s hook failed') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
169 return r
169 return r
170
170
171 def runhook(name, cmd):
171 def runhook(name, cmd):
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
174 r = util.system(cmd, environ=env, cwd=self.root)
174 r = util.system(cmd, environ=env, cwd=self.root)
175 if r:
175 if r:
176 desc, r = util.explain_exit(r)
176 desc, r = util.explain_exit(r)
177 if throw:
177 if throw:
178 raise util.Abort(_('%s hook %s') % (name, desc))
178 raise util.Abort(_('%s hook %s') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
180 return r
180 return r
181
181
182 r = False
182 r = False
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
184 if hname.split(".", 1)[0] == name and cmd]
184 if hname.split(".", 1)[0] == name and cmd]
185 hooks.sort()
185 hooks.sort()
186 for hname, cmd in hooks:
186 for hname, cmd in hooks:
187 if callable(cmd):
187 if callable(cmd):
188 r = callhook(hname, cmd) or r
188 r = callhook(hname, cmd) or r
189 elif cmd.startswith('python:'):
189 elif cmd.startswith('python:'):
190 r = callhook(hname, cmd[7:].strip()) or r
190 r = callhook(hname, cmd[7:].strip()) or r
191 else:
191 else:
192 r = runhook(hname, cmd) or r
192 r = runhook(hname, cmd) or r
193 return r
193 return r
194
194
195 tag_disallowed = ':\r\n'
195 tag_disallowed = ':\r\n'
196
196
197 def _tag(self, name, node, message, local, user, date, parent=None):
197 def _tag(self, name, node, message, local, user, date, parent=None):
198 use_dirstate = parent is None
198 use_dirstate = parent is None
199
199
200 for c in self.tag_disallowed:
200 for c in self.tag_disallowed:
201 if c in name:
201 if c in name:
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
203
203
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
205
205
206 if local:
206 if local:
207 # local tags are stored in the current charset
207 # local tags are stored in the current charset
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
209 self.hook('tag', node=hex(node), tag=name, local=local)
209 self.hook('tag', node=hex(node), tag=name, local=local)
210 return
210 return
211
211
212 # committed tags are stored in UTF-8
212 # committed tags are stored in UTF-8
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
214 if use_dirstate:
214 if use_dirstate:
215 self.wfile('.hgtags', 'ab').write(line)
215 self.wfile('.hgtags', 'ab').write(line)
216 else:
216 else:
217 ntags = self.filectx('.hgtags', parent).data()
217 ntags = self.filectx('.hgtags', parent).data()
218 self.wfile('.hgtags', 'ab').write(ntags + line)
218 self.wfile('.hgtags', 'ab').write(ntags + line)
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
220 self.add(['.hgtags'])
220 self.add(['.hgtags'])
221
221
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
223
223
224 self.hook('tag', node=hex(node), tag=name, local=local)
224 self.hook('tag', node=hex(node), tag=name, local=local)
225
225
226 return tagnode
226 return tagnode
227
227
228 def tag(self, name, node, message, local, user, date):
228 def tag(self, name, node, message, local, user, date):
229 '''tag a revision with a symbolic name.
229 '''tag a revision with a symbolic name.
230
230
231 if local is True, the tag is stored in a per-repository file.
231 if local is True, the tag is stored in a per-repository file.
232 otherwise, it is stored in the .hgtags file, and a new
232 otherwise, it is stored in the .hgtags file, and a new
233 changeset is committed with the change.
233 changeset is committed with the change.
234
234
235 keyword arguments:
235 keyword arguments:
236
236
237 local: whether to store tag in non-version-controlled file
237 local: whether to store tag in non-version-controlled file
238 (default False)
238 (default False)
239
239
240 message: commit message to use if committing
240 message: commit message to use if committing
241
241
242 user: name of user to use if committing
242 user: name of user to use if committing
243
243
244 date: date tuple to use if committing'''
244 date: date tuple to use if committing'''
245
245
246 for x in self.status()[:5]:
246 for x in self.status()[:5]:
247 if '.hgtags' in x:
247 if '.hgtags' in x:
248 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
249 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
250
250
251
251
252 self._tag(name, node, message, local, user, date)
252 self._tag(name, node, message, local, user, date)
253
253
254 def tags(self):
254 def tags(self):
255 '''return a mapping of tag to node'''
255 '''return a mapping of tag to node'''
256 if self.tagscache:
256 if self.tagscache:
257 return self.tagscache
257 return self.tagscache
258
258
259 globaltags = {}
259 globaltags = {}
260
260
261 def readtags(lines, fn):
261 def readtags(lines, fn):
262 filetags = {}
262 filetags = {}
263 count = 0
263 count = 0
264
264
265 def warn(msg):
265 def warn(msg):
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
267
267
268 for l in lines:
268 for l in lines:
269 count += 1
269 count += 1
270 if not l:
270 if not l:
271 continue
271 continue
272 s = l.split(" ", 1)
272 s = l.split(" ", 1)
273 if len(s) != 2:
273 if len(s) != 2:
274 warn(_("cannot parse entry"))
274 warn(_("cannot parse entry"))
275 continue
275 continue
276 node, key = s
276 node, key = s
277 key = util.tolocal(key.strip()) # stored in UTF-8
277 key = util.tolocal(key.strip()) # stored in UTF-8
278 try:
278 try:
279 bin_n = bin(node)
279 bin_n = bin(node)
280 except TypeError:
280 except TypeError:
281 warn(_("node '%s' is not well formed") % node)
281 warn(_("node '%s' is not well formed") % node)
282 continue
282 continue
283 if bin_n not in self.changelog.nodemap:
283 if bin_n not in self.changelog.nodemap:
284 warn(_("tag '%s' refers to unknown node") % key)
284 warn(_("tag '%s' refers to unknown node") % key)
285 continue
285 continue
286
286
287 h = []
287 h = []
288 if key in filetags:
288 if key in filetags:
289 n, h = filetags[key]
289 n, h = filetags[key]
290 h.append(n)
290 h.append(n)
291 filetags[key] = (bin_n, h)
291 filetags[key] = (bin_n, h)
292
292
293 for k,nh in filetags.items():
293 for k,nh in filetags.items():
294 if k not in globaltags:
294 if k not in globaltags:
295 globaltags[k] = nh
295 globaltags[k] = nh
296 continue
296 continue
297 # we prefer the global tag if:
297 # we prefer the global tag if:
298 # it supercedes us OR
298 # it supercedes us OR
299 # mutual supercedes and it has a higher rank
299 # mutual supercedes and it has a higher rank
300 # otherwise we win because we're tip-most
300 # otherwise we win because we're tip-most
301 an, ah = nh
301 an, ah = nh
302 bn, bh = globaltags[k]
302 bn, bh = globaltags[k]
303 if bn != an and an in bh and \
303 if bn != an and an in bh and \
304 (bn not in ah or len(bh) > len(ah)):
304 (bn not in ah or len(bh) > len(ah)):
305 an = bn
305 an = bn
306 ah.extend([n for n in bh if n not in ah])
306 ah.extend([n for n in bh if n not in ah])
307 globaltags[k] = an, ah
307 globaltags[k] = an, ah
308
308
309 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
310 f = None
310 f = None
311 for rev, node, fnode in self._hgtagsnodes():
311 for rev, node, fnode in self._hgtagsnodes():
312 f = (f and f.filectx(fnode) or
312 f = (f and f.filectx(fnode) or
313 self.filectx('.hgtags', fileid=fnode))
313 self.filectx('.hgtags', fileid=fnode))
314 readtags(f.data().splitlines(), f)
314 readtags(f.data().splitlines(), f)
315
315
316 try:
316 try:
317 data = util.fromlocal(self.opener("localtags").read())
317 data = util.fromlocal(self.opener("localtags").read())
318 # localtags are stored in the local character set
318 # localtags are stored in the local character set
319 # while the internal tag table is stored in UTF-8
319 # while the internal tag table is stored in UTF-8
320 readtags(data.splitlines(), "localtags")
320 readtags(data.splitlines(), "localtags")
321 except IOError:
321 except IOError:
322 pass
322 pass
323
323
324 self.tagscache = {}
324 self.tagscache = {}
325 for k,nh in globaltags.items():
325 for k,nh in globaltags.items():
326 n = nh[0]
326 n = nh[0]
327 if n != nullid:
327 if n != nullid:
328 self.tagscache[k] = n
328 self.tagscache[k] = n
329 self.tagscache['tip'] = self.changelog.tip()
329 self.tagscache['tip'] = self.changelog.tip()
330
330
331 return self.tagscache
331 return self.tagscache
332
332
333 def _hgtagsnodes(self):
333 def _hgtagsnodes(self):
334 heads = self.heads()
334 heads = self.heads()
335 heads.reverse()
335 heads.reverse()
336 last = {}
336 last = {}
337 ret = []
337 ret = []
338 for node in heads:
338 for node in heads:
339 c = self.changectx(node)
339 c = self.changectx(node)
340 rev = c.rev()
340 rev = c.rev()
341 try:
341 try:
342 fnode = c.filenode('.hgtags')
342 fnode = c.filenode('.hgtags')
343 except revlog.LookupError:
343 except revlog.LookupError:
344 continue
344 continue
345 ret.append((rev, node, fnode))
345 ret.append((rev, node, fnode))
346 if fnode in last:
346 if fnode in last:
347 ret[last[fnode]] = None
347 ret[last[fnode]] = None
348 last[fnode] = len(ret) - 1
348 last[fnode] = len(ret) - 1
349 return [item for item in ret if item]
349 return [item for item in ret if item]
350
350
351 def tagslist(self):
351 def tagslist(self):
352 '''return a list of tags ordered by revision'''
352 '''return a list of tags ordered by revision'''
353 l = []
353 l = []
354 for t, n in self.tags().items():
354 for t, n in self.tags().items():
355 try:
355 try:
356 r = self.changelog.rev(n)
356 r = self.changelog.rev(n)
357 except:
357 except:
358 r = -2 # sort to the beginning of the list if unknown
358 r = -2 # sort to the beginning of the list if unknown
359 l.append((r, t, n))
359 l.append((r, t, n))
360 l.sort()
360 l.sort()
361 return [(t, n) for r, t, n in l]
361 return [(t, n) for r, t, n in l]
362
362
363 def nodetags(self, node):
363 def nodetags(self, node):
364 '''return the tags associated with a node'''
364 '''return the tags associated with a node'''
365 if not self.nodetagscache:
365 if not self.nodetagscache:
366 self.nodetagscache = {}
366 self.nodetagscache = {}
367 for t, n in self.tags().items():
367 for t, n in self.tags().items():
368 self.nodetagscache.setdefault(n, []).append(t)
368 self.nodetagscache.setdefault(n, []).append(t)
369 return self.nodetagscache.get(node, [])
369 return self.nodetagscache.get(node, [])
370
370
371 def _branchtags(self):
371 def _branchtags(self):
372 partial, last, lrev = self._readbranchcache()
372 partial, last, lrev = self._readbranchcache()
373
373
374 tiprev = self.changelog.count() - 1
374 tiprev = self.changelog.count() - 1
375 if lrev != tiprev:
375 if lrev != tiprev:
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378
378
379 return partial
379 return partial
380
380
381 def branchtags(self):
381 def branchtags(self):
382 if self.branchcache is not None:
382 if self.branchcache is not None:
383 return self.branchcache
383 return self.branchcache
384
384
385 self.branchcache = {} # avoid recursion in changectx
385 self.branchcache = {} # avoid recursion in changectx
386 partial = self._branchtags()
386 partial = self._branchtags()
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 return self.branchcache
392 return self.branchcache
393
393
394 def _readbranchcache(self):
394 def _readbranchcache(self):
395 partial = {}
395 partial = {}
396 try:
396 try:
397 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
398 lines = f.read().split('\n')
398 lines = f.read().split('\n')
399 f.close()
399 f.close()
400 except (IOError, OSError):
400 except (IOError, OSError):
401 return {}, nullid, nullrev
401 return {}, nullid, nullrev
402
402
403 try:
403 try:
404 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
406 if not (lrev < self.changelog.count() and
406 if not (lrev < self.changelog.count() and
407 self.changelog.node(lrev) == last): # sanity check
407 self.changelog.node(lrev) == last): # sanity check
408 # invalidate the cache
408 # invalidate the cache
409 raise ValueError('Invalid branch cache: unknown tip')
409 raise ValueError('Invalid branch cache: unknown tip')
410 for l in lines:
410 for l in lines:
411 if not l: continue
411 if not l: continue
412 node, label = l.split(" ", 1)
412 node, label = l.split(" ", 1)
413 partial[label.strip()] = bin(node)
413 partial[label.strip()] = bin(node)
414 except (KeyboardInterrupt, util.SignalInterrupt):
414 except (KeyboardInterrupt, util.SignalInterrupt):
415 raise
415 raise
416 except Exception, inst:
416 except Exception, inst:
417 if self.ui.debugflag:
417 if self.ui.debugflag:
418 self.ui.warn(str(inst), '\n')
418 self.ui.warn(str(inst), '\n')
419 partial, last, lrev = {}, nullid, nullrev
419 partial, last, lrev = {}, nullid, nullrev
420 return partial, last, lrev
420 return partial, last, lrev
421
421
422 def _writebranchcache(self, branches, tip, tiprev):
422 def _writebranchcache(self, branches, tip, tiprev):
423 try:
423 try:
424 f = self.opener("branch.cache", "w", atomictemp=True)
424 f = self.opener("branch.cache", "w", atomictemp=True)
425 f.write("%s %s\n" % (hex(tip), tiprev))
425 f.write("%s %s\n" % (hex(tip), tiprev))
426 for label, node in branches.iteritems():
426 for label, node in branches.iteritems():
427 f.write("%s %s\n" % (hex(node), label))
427 f.write("%s %s\n" % (hex(node), label))
428 f.rename()
428 f.rename()
429 except (IOError, OSError):
429 except (IOError, OSError):
430 pass
430 pass
431
431
432 def _updatebranchcache(self, partial, start, end):
432 def _updatebranchcache(self, partial, start, end):
433 for r in xrange(start, end):
433 for r in xrange(start, end):
434 c = self.changectx(r)
434 c = self.changectx(r)
435 b = c.branch()
435 b = c.branch()
436 partial[b] = c.node()
436 partial[b] = c.node()
437
437
438 def lookup(self, key):
438 def lookup(self, key):
439 if key == '.':
439 if key == '.':
440 key, second = self.dirstate.parents()
440 key, second = self.dirstate.parents()
441 if key == nullid:
441 if key == nullid:
442 raise repo.RepoError(_("no revision checked out"))
442 raise repo.RepoError(_("no revision checked out"))
443 if second != nullid:
443 if second != nullid:
444 self.ui.warn(_("warning: working directory has two parents, "
444 self.ui.warn(_("warning: working directory has two parents, "
445 "tag '.' uses the first\n"))
445 "tag '.' uses the first\n"))
446 elif key == 'null':
446 elif key == 'null':
447 return nullid
447 return nullid
448 n = self.changelog._match(key)
448 n = self.changelog._match(key)
449 if n:
449 if n:
450 return n
450 return n
451 if key in self.tags():
451 if key in self.tags():
452 return self.tags()[key]
452 return self.tags()[key]
453 if key in self.branchtags():
453 if key in self.branchtags():
454 return self.branchtags()[key]
454 return self.branchtags()[key]
455 n = self.changelog._partialmatch(key)
455 n = self.changelog._partialmatch(key)
456 if n:
456 if n:
457 return n
457 return n
458 raise repo.RepoError(_("unknown revision '%s'") % key)
458 raise repo.RepoError(_("unknown revision '%s'") % key)
459
459
460 def dev(self):
460 def dev(self):
461 return os.lstat(self.path).st_dev
461 return os.lstat(self.path).st_dev
462
462
463 def local(self):
463 def local(self):
464 return True
464 return True
465
465
466 def join(self, f):
466 def join(self, f):
467 return os.path.join(self.path, f)
467 return os.path.join(self.path, f)
468
468
469 def sjoin(self, f):
469 def sjoin(self, f):
470 f = self.encodefn(f)
470 f = self.encodefn(f)
471 return os.path.join(self.spath, f)
471 return os.path.join(self.spath, f)
472
472
473 def wjoin(self, f):
473 def wjoin(self, f):
474 return os.path.join(self.root, f)
474 return os.path.join(self.root, f)
475
475
476 def file(self, f):
476 def file(self, f):
477 if f[0] == '/':
477 if f[0] == '/':
478 f = f[1:]
478 f = f[1:]
479 return filelog.filelog(self.sopener, f)
479 return filelog.filelog(self.sopener, f)
480
480
481 def changectx(self, changeid=None):
481 def changectx(self, changeid=None):
482 return context.changectx(self, changeid)
482 return context.changectx(self, changeid)
483
483
484 def workingctx(self):
484 def workingctx(self):
485 return context.workingctx(self)
485 return context.workingctx(self)
486
486
487 def parents(self, changeid=None):
487 def parents(self, changeid=None):
488 '''
488 '''
489 get list of changectxs for parents of changeid or working directory
489 get list of changectxs for parents of changeid or working directory
490 '''
490 '''
491 if changeid is None:
491 if changeid is None:
492 pl = self.dirstate.parents()
492 pl = self.dirstate.parents()
493 else:
493 else:
494 n = self.changelog.lookup(changeid)
494 n = self.changelog.lookup(changeid)
495 pl = self.changelog.parents(n)
495 pl = self.changelog.parents(n)
496 if pl[1] == nullid:
496 if pl[1] == nullid:
497 return [self.changectx(pl[0])]
497 return [self.changectx(pl[0])]
498 return [self.changectx(pl[0]), self.changectx(pl[1])]
498 return [self.changectx(pl[0]), self.changectx(pl[1])]
499
499
500 def filectx(self, path, changeid=None, fileid=None):
500 def filectx(self, path, changeid=None, fileid=None):
501 """changeid can be a changeset revision, node, or tag.
501 """changeid can be a changeset revision, node, or tag.
502 fileid can be a file revision or node."""
502 fileid can be a file revision or node."""
503 return context.filectx(self, path, changeid, fileid)
503 return context.filectx(self, path, changeid, fileid)
504
504
505 def getcwd(self):
505 def getcwd(self):
506 return self.dirstate.getcwd()
506 return self.dirstate.getcwd()
507
507
508 def pathto(self, f, cwd=None):
508 def pathto(self, f, cwd=None):
509 return self.dirstate.pathto(f, cwd)
509 return self.dirstate.pathto(f, cwd)
510
510
511 def wfile(self, f, mode='r'):
511 def wfile(self, f, mode='r'):
512 return self.wopener(f, mode)
512 return self.wopener(f, mode)
513
513
514 def _link(self, f):
514 def _link(self, f):
515 return os.path.islink(self.wjoin(f))
515 return os.path.islink(self.wjoin(f))
516
516
517 def _filter(self, filter, filename, data):
517 def _filter(self, filter, filename, data):
518 if filter not in self.filterpats:
518 if filter not in self.filterpats:
519 l = []
519 l = []
520 for pat, cmd in self.ui.configitems(filter):
520 for pat, cmd in self.ui.configitems(filter):
521 mf = util.matcher(self.root, "", [pat], [], [])[1]
521 mf = util.matcher(self.root, "", [pat], [], [])[1]
522 l.append((mf, cmd))
522 l.append((mf, cmd))
523 self.filterpats[filter] = l
523 self.filterpats[filter] = l
524
524
525 for mf, cmd in self.filterpats[filter]:
525 for mf, cmd in self.filterpats[filter]:
526 if mf(filename):
526 if mf(filename):
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
528 data = util.filter(data, cmd)
528 data = util.filter(data, cmd)
529 break
529 break
530
530
531 return data
531 return data
532
532
533 def wread(self, filename):
533 def wread(self, filename):
534 if self._link(filename):
534 if self._link(filename):
535 data = os.readlink(self.wjoin(filename))
535 data = os.readlink(self.wjoin(filename))
536 else:
536 else:
537 data = self.wopener(filename, 'r').read()
537 data = self.wopener(filename, 'r').read()
538 return self._filter("encode", filename, data)
538 return self._filter("encode", filename, data)
539
539
540 def wwrite(self, filename, data, flags):
540 def wwrite(self, filename, data, flags):
541 data = self._filter("decode", filename, data)
541 data = self._filter("decode", filename, data)
542 if "l" in flags:
542 if "l" in flags:
543 f = self.wjoin(filename)
543 f = self.wjoin(filename)
544 try:
544 try:
545 os.unlink(f)
545 os.unlink(f)
546 except OSError:
546 except OSError:
547 pass
547 pass
548 d = os.path.dirname(f)
548 d = os.path.dirname(f)
549 if not os.path.exists(d):
549 if not os.path.exists(d):
550 os.makedirs(d)
550 os.makedirs(d)
551 os.symlink(data, f)
551 os.symlink(data, f)
552 else:
552 else:
553 try:
553 try:
554 if self._link(filename):
554 if self._link(filename):
555 os.unlink(self.wjoin(filename))
555 os.unlink(self.wjoin(filename))
556 except OSError:
556 except OSError:
557 pass
557 pass
558 self.wopener(filename, 'w').write(data)
558 self.wopener(filename, 'w').write(data)
559 util.set_exec(self.wjoin(filename), "x" in flags)
559 util.set_exec(self.wjoin(filename), "x" in flags)
560
560
561 def wwritedata(self, filename, data):
561 def wwritedata(self, filename, data):
562 return self._filter("decode", filename, data)
562 return self._filter("decode", filename, data)
563
563
564 def transaction(self):
564 def transaction(self):
565 tr = self.transhandle
565 tr = self.transhandle
566 if tr != None and tr.running():
566 if tr != None and tr.running():
567 return tr.nest()
567 return tr.nest()
568
568
569 # save dirstate for rollback
569 # save dirstate for rollback
570 try:
570 try:
571 ds = self.opener("dirstate").read()
571 ds = self.opener("dirstate").read()
572 except IOError:
572 except IOError:
573 ds = ""
573 ds = ""
574 self.opener("journal.dirstate", "w").write(ds)
574 self.opener("journal.dirstate", "w").write(ds)
575
575
576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames))
580 aftertrans(renames))
581 self.transhandle = tr
581 self.transhandle = tr
582 return tr
582 return tr
583
583
584 def recover(self):
584 def recover(self):
585 l = self.lock()
585 l = self.lock()
586 if os.path.exists(self.sjoin("journal")):
586 if os.path.exists(self.sjoin("journal")):
587 self.ui.status(_("rolling back interrupted transaction\n"))
587 self.ui.status(_("rolling back interrupted transaction\n"))
588 transaction.rollback(self.sopener, self.sjoin("journal"))
588 transaction.rollback(self.sopener, self.sjoin("journal"))
589 self.reload()
589 self.reload()
590 return True
590 return True
591 else:
591 else:
592 self.ui.warn(_("no interrupted transaction available\n"))
592 self.ui.warn(_("no interrupted transaction available\n"))
593 return False
593 return False
594
594
595 def rollback(self, wlock=None, lock=None):
595 def rollback(self, wlock=None, lock=None):
596 if not wlock:
596 if not wlock:
597 wlock = self.wlock()
597 wlock = self.wlock()
598 if not lock:
598 if not lock:
599 lock = self.lock()
599 lock = self.lock()
600 if os.path.exists(self.sjoin("undo")):
600 if os.path.exists(self.sjoin("undo")):
601 self.ui.status(_("rolling back last transaction\n"))
601 self.ui.status(_("rolling back last transaction\n"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 self.reload()
604 self.reload()
605 self.wreload()
605 self.wreload()
606 else:
606 else:
607 self.ui.warn(_("no rollback information available\n"))
607 self.ui.warn(_("no rollback information available\n"))
608
608
609 def wreload(self):
609 def wreload(self):
610 self.dirstate.reload()
610 self.dirstate.reload()
611
611
612 def reload(self):
612 def reload(self):
613 self.changelog.load()
613 self.changelog.load()
614 self.manifest.load()
614 self.manifest.load()
615 self.tagscache = None
615 self.tagscache = None
616 self.nodetagscache = None
616 self.nodetagscache = None
617
617
618 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
618 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
619 desc=None):
619 desc=None):
620 try:
620 try:
621 l = lock.lock(lockname, 0, releasefn, desc=desc)
621 l = lock.lock(lockname, 0, releasefn, desc=desc)
622 except lock.LockHeld, inst:
622 except lock.LockHeld, inst:
623 if not wait:
623 if not wait:
624 raise
624 raise
625 self.ui.warn(_("waiting for lock on %s held by %r\n") %
625 self.ui.warn(_("waiting for lock on %s held by %r\n") %
626 (desc, inst.locker))
626 (desc, inst.locker))
627 # default to 600 seconds timeout
627 # default to 600 seconds timeout
628 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
628 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
629 releasefn, desc=desc)
629 releasefn, desc=desc)
630 if acquirefn:
630 if acquirefn:
631 acquirefn()
631 acquirefn()
632 return l
632 return l
633
633
634 def lock(self, wait=1):
634 def lock(self, wait=1):
635 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
635 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
636 desc=_('repository %s') % self.origroot)
636 desc=_('repository %s') % self.origroot)
637
637
638 def wlock(self, wait=1):
638 def wlock(self, wait=1):
639 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
639 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
640 self.wreload,
640 self.wreload,
641 desc=_('working directory of %s') % self.origroot)
641 desc=_('working directory of %s') % self.origroot)
642
642
643 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
643 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
644 """
644 """
645 commit an individual file as part of a larger transaction
645 commit an individual file as part of a larger transaction
646 """
646 """
647
647
648 t = self.wread(fn)
648 t = self.wread(fn)
649 fl = self.file(fn)
649 fl = self.file(fn)
650 fp1 = manifest1.get(fn, nullid)
650 fp1 = manifest1.get(fn, nullid)
651 fp2 = manifest2.get(fn, nullid)
651 fp2 = manifest2.get(fn, nullid)
652
652
653 meta = {}
653 meta = {}
654 cp = self.dirstate.copied(fn)
654 cp = self.dirstate.copied(fn)
655 if cp:
655 if cp:
656 # Mark the new revision of this file as a copy of another
656 # Mark the new revision of this file as a copy of another
657 # file. This copy data will effectively act as a parent
657 # file. This copy data will effectively act as a parent
658 # of this new revision. If this is a merge, the first
658 # of this new revision. If this is a merge, the first
659 # parent will be the nullid (meaning "look up the copy data")
659 # parent will be the nullid (meaning "look up the copy data")
660 # and the second one will be the other parent. For example:
660 # and the second one will be the other parent. For example:
661 #
661 #
662 # 0 --- 1 --- 3 rev1 changes file foo
662 # 0 --- 1 --- 3 rev1 changes file foo
663 # \ / rev2 renames foo to bar and changes it
663 # \ / rev2 renames foo to bar and changes it
664 # \- 2 -/ rev3 should have bar with all changes and
664 # \- 2 -/ rev3 should have bar with all changes and
665 # should record that bar descends from
665 # should record that bar descends from
666 # bar in rev2 and foo in rev1
666 # bar in rev2 and foo in rev1
667 #
667 #
668 # this allows this merge to succeed:
668 # this allows this merge to succeed:
669 #
669 #
670 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
670 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
671 # \ / merging rev3 and rev4 should use bar@rev2
671 # \ / merging rev3 and rev4 should use bar@rev2
672 # \- 2 --- 4 as the merge base
672 # \- 2 --- 4 as the merge base
673 #
673 #
674 meta["copy"] = cp
674 meta["copy"] = cp
675 if not manifest2: # not a branch merge
675 if not manifest2: # not a branch merge
676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
677 fp2 = nullid
677 fp2 = nullid
678 elif fp2 != nullid: # copied on remote side
678 elif fp2 != nullid: # copied on remote side
679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
680 elif fp1 != nullid: # copied on local side, reversed
680 elif fp1 != nullid: # copied on local side, reversed
681 meta["copyrev"] = hex(manifest2.get(cp))
681 meta["copyrev"] = hex(manifest2.get(cp))
682 fp2 = fp1
682 fp2 = fp1
683 else: # directory rename
683 else: # directory rename
684 meta["copyrev"] = hex(manifest1.get(cp, nullid))
684 meta["copyrev"] = hex(manifest1.get(cp, nullid))
685 self.ui.debug(_(" %s: copy %s:%s\n") %
685 self.ui.debug(_(" %s: copy %s:%s\n") %
686 (fn, cp, meta["copyrev"]))
686 (fn, cp, meta["copyrev"]))
687 fp1 = nullid
687 fp1 = nullid
688 elif fp2 != nullid:
688 elif fp2 != nullid:
689 # is one parent an ancestor of the other?
689 # is one parent an ancestor of the other?
690 fpa = fl.ancestor(fp1, fp2)
690 fpa = fl.ancestor(fp1, fp2)
691 if fpa == fp1:
691 if fpa == fp1:
692 fp1, fp2 = fp2, nullid
692 fp1, fp2 = fp2, nullid
693 elif fpa == fp2:
693 elif fpa == fp2:
694 fp2 = nullid
694 fp2 = nullid
695
695
696 # is the file unmodified from the parent? report existing entry
696 # is the file unmodified from the parent? report existing entry
697 if fp2 == nullid and not fl.cmp(fp1, t):
697 if fp2 == nullid and not fl.cmp(fp1, t):
698 return fp1
698 return fp1
699
699
700 changelist.append(fn)
700 changelist.append(fn)
701 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
701 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
702
702
703 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
703 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
704 if p1 is None:
704 if p1 is None:
705 p1, p2 = self.dirstate.parents()
705 p1, p2 = self.dirstate.parents()
706 return self.commit(files=files, text=text, user=user, date=date,
706 return self.commit(files=files, text=text, user=user, date=date,
707 p1=p1, p2=p2, wlock=wlock, extra=extra)
707 p1=p1, p2=p2, wlock=wlock, extra=extra)
708
708
709 def commit(self, files=None, text="", user=None, date=None,
709 def commit(self, files=None, text="", user=None, date=None,
710 match=util.always, force=False, lock=None, wlock=None,
710 match=util.always, force=False, lock=None, wlock=None,
711 force_editor=False, p1=None, p2=None, extra={}):
711 force_editor=False, p1=None, p2=None, extra={}):
712
712
713 commit = []
713 commit = []
714 remove = []
714 remove = []
715 changed = []
715 changed = []
716 use_dirstate = (p1 is None) # not rawcommit
716 use_dirstate = (p1 is None) # not rawcommit
717 extra = extra.copy()
717 extra = extra.copy()
718
718
719 if use_dirstate:
719 if use_dirstate:
720 if files:
720 if files:
721 for f in files:
721 for f in files:
722 s = self.dirstate.state(f)
722 s = self.dirstate.state(f)
723 if s in 'nmai':
723 if s in 'nmai':
724 commit.append(f)
724 commit.append(f)
725 elif s == 'r':
725 elif s == 'r':
726 remove.append(f)
726 remove.append(f)
727 else:
727 else:
728 self.ui.warn(_("%s not tracked!\n") % f)
728 self.ui.warn(_("%s not tracked!\n") % f)
729 else:
729 else:
730 changes = self.status(match=match)[:5]
730 changes = self.status(match=match)[:5]
731 modified, added, removed, deleted, unknown = changes
731 modified, added, removed, deleted, unknown = changes
732 commit = modified + added
732 commit = modified + added
733 remove = removed
733 remove = removed
734 else:
734 else:
735 commit = files
735 commit = files
736
736
737 if use_dirstate:
737 if use_dirstate:
738 p1, p2 = self.dirstate.parents()
738 p1, p2 = self.dirstate.parents()
739 update_dirstate = True
739 update_dirstate = True
740 else:
740 else:
741 p1, p2 = p1, p2 or nullid
741 p1, p2 = p1, p2 or nullid
742 update_dirstate = (self.dirstate.parents()[0] == p1)
742 update_dirstate = (self.dirstate.parents()[0] == p1)
743
743
744 c1 = self.changelog.read(p1)
744 c1 = self.changelog.read(p1)
745 c2 = self.changelog.read(p2)
745 c2 = self.changelog.read(p2)
746 m1 = self.manifest.read(c1[0]).copy()
746 m1 = self.manifest.read(c1[0]).copy()
747 m2 = self.manifest.read(c2[0])
747 m2 = self.manifest.read(c2[0])
748
748
749 if use_dirstate:
749 if use_dirstate:
750 branchname = self.workingctx().branch()
750 branchname = self.workingctx().branch()
751 try:
751 try:
752 branchname = branchname.decode('UTF-8').encode('UTF-8')
752 branchname = branchname.decode('UTF-8').encode('UTF-8')
753 except UnicodeDecodeError:
753 except UnicodeDecodeError:
754 raise util.Abort(_('branch name not in UTF-8!'))
754 raise util.Abort(_('branch name not in UTF-8!'))
755 else:
755 else:
756 branchname = ""
756 branchname = ""
757
757
758 if use_dirstate:
758 if use_dirstate:
759 oldname = c1[5].get("branch") # stored in UTF-8
759 oldname = c1[5].get("branch") # stored in UTF-8
760 if not commit and not remove and not force and p2 == nullid and \
760 if not commit and not remove and not force and p2 == nullid and \
761 branchname == oldname:
761 branchname == oldname:
762 self.ui.status(_("nothing changed\n"))
762 self.ui.status(_("nothing changed\n"))
763 return None
763 return None
764
764
765 xp1 = hex(p1)
765 xp1 = hex(p1)
766 if p2 == nullid: xp2 = ''
766 if p2 == nullid: xp2 = ''
767 else: xp2 = hex(p2)
767 else: xp2 = hex(p2)
768
768
769 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
769 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
770
770
771 if not wlock:
771 if not wlock:
772 wlock = self.wlock()
772 wlock = self.wlock()
773 if not lock:
773 if not lock:
774 lock = self.lock()
774 lock = self.lock()
775 tr = self.transaction()
775 tr = self.transaction()
776
776
777 # check in files
777 # check in files
778 new = {}
778 new = {}
779 linkrev = self.changelog.count()
779 linkrev = self.changelog.count()
780 commit.sort()
780 commit.sort()
781 is_exec = util.execfunc(self.root, m1.execf)
781 is_exec = util.execfunc(self.root, m1.execf)
782 is_link = util.linkfunc(self.root, m1.linkf)
782 is_link = util.linkfunc(self.root, m1.linkf)
783 for f in commit:
783 for f in commit:
784 self.ui.note(f + "\n")
784 self.ui.note(f + "\n")
785 try:
785 try:
786 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
786 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
787 m1.set(f, is_exec(f), is_link(f))
787 new_exec = is_exec(f)
788 new_link = is_link(f)
789 if not changed or changed[-1] != f:
790 # mention the file in the changelog if some flag changed,
791 # even if there was no content change.
792 old_exec = m1.execf(f)
793 old_link = m1.linkf(f)
794 if old_exec != new_exec or old_link != new_link:
795 changed.append(f)
796 m1.set(f, new_exec, new_link)
788 except (OSError, IOError):
797 except (OSError, IOError):
789 if use_dirstate:
798 if use_dirstate:
790 self.ui.warn(_("trouble committing %s!\n") % f)
799 self.ui.warn(_("trouble committing %s!\n") % f)
791 raise
800 raise
792 else:
801 else:
793 remove.append(f)
802 remove.append(f)
794
803
795 # update manifest
804 # update manifest
796 m1.update(new)
805 m1.update(new)
797 remove.sort()
806 remove.sort()
798 removed = []
807 removed = []
799
808
800 for f in remove:
809 for f in remove:
801 if f in m1:
810 if f in m1:
802 del m1[f]
811 del m1[f]
803 removed.append(f)
812 removed.append(f)
804 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
813 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
805
814
806 # add changeset
815 # add changeset
807 new = new.keys()
816 new = new.keys()
808 new.sort()
817 new.sort()
809
818
810 user = user or self.ui.username()
819 user = user or self.ui.username()
811 if not text or force_editor:
820 if not text or force_editor:
812 edittext = []
821 edittext = []
813 if text:
822 if text:
814 edittext.append(text)
823 edittext.append(text)
815 edittext.append("")
824 edittext.append("")
816 edittext.append("HG: user: %s" % user)
825 edittext.append("HG: user: %s" % user)
817 if p2 != nullid:
826 if p2 != nullid:
818 edittext.append("HG: branch merge")
827 edittext.append("HG: branch merge")
819 if branchname:
828 if branchname:
820 edittext.append("HG: branch %s" % util.tolocal(branchname))
829 edittext.append("HG: branch %s" % util.tolocal(branchname))
821 edittext.extend(["HG: changed %s" % f for f in changed])
830 edittext.extend(["HG: changed %s" % f for f in changed])
822 edittext.extend(["HG: removed %s" % f for f in removed])
831 edittext.extend(["HG: removed %s" % f for f in removed])
823 if not changed and not remove:
832 if not changed and not remove:
824 edittext.append("HG: no files changed")
833 edittext.append("HG: no files changed")
825 edittext.append("")
834 edittext.append("")
826 # run editor in the repository root
835 # run editor in the repository root
827 olddir = os.getcwd()
836 olddir = os.getcwd()
828 os.chdir(self.root)
837 os.chdir(self.root)
829 text = self.ui.edit("\n".join(edittext), user)
838 text = self.ui.edit("\n".join(edittext), user)
830 os.chdir(olddir)
839 os.chdir(olddir)
831
840
832 lines = [line.rstrip() for line in text.rstrip().splitlines()]
841 lines = [line.rstrip() for line in text.rstrip().splitlines()]
833 while lines and not lines[0]:
842 while lines and not lines[0]:
834 del lines[0]
843 del lines[0]
835 if not lines:
844 if not lines:
836 return None
845 return None
837 text = '\n'.join(lines)
846 text = '\n'.join(lines)
838 if branchname:
847 if branchname:
839 extra["branch"] = branchname
848 extra["branch"] = branchname
840 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
849 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
841 user, date, extra)
850 user, date, extra)
842 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
851 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
843 parent2=xp2)
852 parent2=xp2)
844 tr.close()
853 tr.close()
845
854
846 if self.branchcache and "branch" in extra:
855 if self.branchcache and "branch" in extra:
847 self.branchcache[util.tolocal(extra["branch"])] = n
856 self.branchcache[util.tolocal(extra["branch"])] = n
848
857
849 if use_dirstate or update_dirstate:
858 if use_dirstate or update_dirstate:
850 self.dirstate.setparents(n)
859 self.dirstate.setparents(n)
851 if use_dirstate:
860 if use_dirstate:
852 self.dirstate.update(new, "n")
861 self.dirstate.update(new, "n")
853 self.dirstate.forget(removed)
862 self.dirstate.forget(removed)
854
863
855 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
864 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
856 return n
865 return n
857
866
858 def walk(self, node=None, files=[], match=util.always, badmatch=None):
867 def walk(self, node=None, files=[], match=util.always, badmatch=None):
859 '''
868 '''
860 walk recursively through the directory tree or a given
869 walk recursively through the directory tree or a given
861 changeset, finding all files matched by the match
870 changeset, finding all files matched by the match
862 function
871 function
863
872
864 results are yielded in a tuple (src, filename), where src
873 results are yielded in a tuple (src, filename), where src
865 is one of:
874 is one of:
866 'f' the file was found in the directory tree
875 'f' the file was found in the directory tree
867 'm' the file was only in the dirstate and not in the tree
876 'm' the file was only in the dirstate and not in the tree
868 'b' file was not found and matched badmatch
877 'b' file was not found and matched badmatch
869 '''
878 '''
870
879
871 if node:
880 if node:
872 fdict = dict.fromkeys(files)
881 fdict = dict.fromkeys(files)
873 # for dirstate.walk, files=['.'] means "walk the whole tree".
882 # for dirstate.walk, files=['.'] means "walk the whole tree".
874 # follow that here, too
883 # follow that here, too
875 fdict.pop('.', None)
884 fdict.pop('.', None)
876 mdict = self.manifest.read(self.changelog.read(node)[0])
885 mdict = self.manifest.read(self.changelog.read(node)[0])
877 mfiles = mdict.keys()
886 mfiles = mdict.keys()
878 mfiles.sort()
887 mfiles.sort()
879 for fn in mfiles:
888 for fn in mfiles:
880 for ffn in fdict:
889 for ffn in fdict:
881 # match if the file is the exact name or a directory
890 # match if the file is the exact name or a directory
882 if ffn == fn or fn.startswith("%s/" % ffn):
891 if ffn == fn or fn.startswith("%s/" % ffn):
883 del fdict[ffn]
892 del fdict[ffn]
884 break
893 break
885 if match(fn):
894 if match(fn):
886 yield 'm', fn
895 yield 'm', fn
887 ffiles = fdict.keys()
896 ffiles = fdict.keys()
888 ffiles.sort()
897 ffiles.sort()
889 for fn in ffiles:
898 for fn in ffiles:
890 if badmatch and badmatch(fn):
899 if badmatch and badmatch(fn):
891 if match(fn):
900 if match(fn):
892 yield 'b', fn
901 yield 'b', fn
893 else:
902 else:
894 self.ui.warn(_('%s: No such file in rev %s\n')
903 self.ui.warn(_('%s: No such file in rev %s\n')
895 % (self.pathto(fn), short(node)))
904 % (self.pathto(fn), short(node)))
896 else:
905 else:
897 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
906 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
898 yield src, fn
907 yield src, fn
899
908
900 def status(self, node1=None, node2=None, files=[], match=util.always,
909 def status(self, node1=None, node2=None, files=[], match=util.always,
901 wlock=None, list_ignored=False, list_clean=False):
910 wlock=None, list_ignored=False, list_clean=False):
902 """return status of files between two nodes or node and working directory
911 """return status of files between two nodes or node and working directory
903
912
904 If node1 is None, use the first dirstate parent instead.
913 If node1 is None, use the first dirstate parent instead.
905 If node2 is None, compare node1 with working directory.
914 If node2 is None, compare node1 with working directory.
906 """
915 """
907
916
908 def fcmp(fn, getnode):
917 def fcmp(fn, getnode):
909 t1 = self.wread(fn)
918 t1 = self.wread(fn)
910 return self.file(fn).cmp(getnode(fn), t1)
919 return self.file(fn).cmp(getnode(fn), t1)
911
920
912 def mfmatches(node):
921 def mfmatches(node):
913 change = self.changelog.read(node)
922 change = self.changelog.read(node)
914 mf = self.manifest.read(change[0]).copy()
923 mf = self.manifest.read(change[0]).copy()
915 for fn in mf.keys():
924 for fn in mf.keys():
916 if not match(fn):
925 if not match(fn):
917 del mf[fn]
926 del mf[fn]
918 return mf
927 return mf
919
928
920 modified, added, removed, deleted, unknown = [], [], [], [], []
929 modified, added, removed, deleted, unknown = [], [], [], [], []
921 ignored, clean = [], []
930 ignored, clean = [], []
922
931
923 compareworking = False
932 compareworking = False
924 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
933 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
925 compareworking = True
934 compareworking = True
926
935
927 if not compareworking:
936 if not compareworking:
928 # read the manifest from node1 before the manifest from node2,
937 # read the manifest from node1 before the manifest from node2,
929 # so that we'll hit the manifest cache if we're going through
938 # so that we'll hit the manifest cache if we're going through
930 # all the revisions in parent->child order.
939 # all the revisions in parent->child order.
931 mf1 = mfmatches(node1)
940 mf1 = mfmatches(node1)
932
941
933 mywlock = False
942 mywlock = False
934
943
935 # are we comparing the working directory?
944 # are we comparing the working directory?
936 if not node2:
945 if not node2:
937 (lookup, modified, added, removed, deleted, unknown,
946 (lookup, modified, added, removed, deleted, unknown,
938 ignored, clean) = self.dirstate.status(files, match,
947 ignored, clean) = self.dirstate.status(files, match,
939 list_ignored, list_clean)
948 list_ignored, list_clean)
940
949
941 # are we comparing working dir against its parent?
950 # are we comparing working dir against its parent?
942 if compareworking:
951 if compareworking:
943 if lookup:
952 if lookup:
944 # do a full compare of any files that might have changed
953 # do a full compare of any files that might have changed
945 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
954 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
946 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
955 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
947 nullid)
956 nullid)
948 for f in lookup:
957 for f in lookup:
949 if fcmp(f, getnode):
958 if fcmp(f, getnode):
950 modified.append(f)
959 modified.append(f)
951 else:
960 else:
952 clean.append(f)
961 clean.append(f)
953 if not wlock and not mywlock:
962 if not wlock and not mywlock:
954 mywlock = True
963 mywlock = True
955 try:
964 try:
956 wlock = self.wlock(wait=0)
965 wlock = self.wlock(wait=0)
957 except lock.LockException:
966 except lock.LockException:
958 pass
967 pass
959 if wlock:
968 if wlock:
960 self.dirstate.update([f], "n")
969 self.dirstate.update([f], "n")
961 else:
970 else:
962 # we are comparing working dir against non-parent
971 # we are comparing working dir against non-parent
963 # generate a pseudo-manifest for the working dir
972 # generate a pseudo-manifest for the working dir
964 # XXX: create it in dirstate.py ?
973 # XXX: create it in dirstate.py ?
965 mf2 = mfmatches(self.dirstate.parents()[0])
974 mf2 = mfmatches(self.dirstate.parents()[0])
966 is_exec = util.execfunc(self.root, mf2.execf)
975 is_exec = util.execfunc(self.root, mf2.execf)
967 is_link = util.linkfunc(self.root, mf2.linkf)
976 is_link = util.linkfunc(self.root, mf2.linkf)
968 for f in lookup + modified + added:
977 for f in lookup + modified + added:
969 mf2[f] = ""
978 mf2[f] = ""
970 mf2.set(f, is_exec(f), is_link(f))
979 mf2.set(f, is_exec(f), is_link(f))
971 for f in removed:
980 for f in removed:
972 if f in mf2:
981 if f in mf2:
973 del mf2[f]
982 del mf2[f]
974
983
975 if mywlock and wlock:
984 if mywlock and wlock:
976 wlock.release()
985 wlock.release()
977 else:
986 else:
978 # we are comparing two revisions
987 # we are comparing two revisions
979 mf2 = mfmatches(node2)
988 mf2 = mfmatches(node2)
980
989
981 if not compareworking:
990 if not compareworking:
982 # flush lists from dirstate before comparing manifests
991 # flush lists from dirstate before comparing manifests
983 modified, added, clean = [], [], []
992 modified, added, clean = [], [], []
984
993
985 # make sure to sort the files so we talk to the disk in a
994 # make sure to sort the files so we talk to the disk in a
986 # reasonable order
995 # reasonable order
987 mf2keys = mf2.keys()
996 mf2keys = mf2.keys()
988 mf2keys.sort()
997 mf2keys.sort()
989 getnode = lambda fn: mf1.get(fn, nullid)
998 getnode = lambda fn: mf1.get(fn, nullid)
990 for fn in mf2keys:
999 for fn in mf2keys:
991 if mf1.has_key(fn):
1000 if mf1.has_key(fn):
992 if mf1.flags(fn) != mf2.flags(fn) or \
1001 if mf1.flags(fn) != mf2.flags(fn) or \
993 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1002 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
994 fcmp(fn, getnode))):
1003 fcmp(fn, getnode))):
995 modified.append(fn)
1004 modified.append(fn)
996 elif list_clean:
1005 elif list_clean:
997 clean.append(fn)
1006 clean.append(fn)
998 del mf1[fn]
1007 del mf1[fn]
999 else:
1008 else:
1000 added.append(fn)
1009 added.append(fn)
1001
1010
1002 removed = mf1.keys()
1011 removed = mf1.keys()
1003
1012
1004 # sort and return results:
1013 # sort and return results:
1005 for l in modified, added, removed, deleted, unknown, ignored, clean:
1014 for l in modified, added, removed, deleted, unknown, ignored, clean:
1006 l.sort()
1015 l.sort()
1007 return (modified, added, removed, deleted, unknown, ignored, clean)
1016 return (modified, added, removed, deleted, unknown, ignored, clean)
1008
1017
1009 def add(self, list, wlock=None):
1018 def add(self, list, wlock=None):
1010 if not wlock:
1019 if not wlock:
1011 wlock = self.wlock()
1020 wlock = self.wlock()
1012 for f in list:
1021 for f in list:
1013 p = self.wjoin(f)
1022 p = self.wjoin(f)
1014 islink = os.path.islink(p)
1023 islink = os.path.islink(p)
1015 size = os.lstat(p).st_size
1024 size = os.lstat(p).st_size
1016 if size > 10000000:
1025 if size > 10000000:
1017 self.ui.warn(_("%s: files over 10MB may cause memory and"
1026 self.ui.warn(_("%s: files over 10MB may cause memory and"
1018 " performance problems\n"
1027 " performance problems\n"
1019 "(use 'hg revert %s' to unadd the file)\n")
1028 "(use 'hg revert %s' to unadd the file)\n")
1020 % (f, f))
1029 % (f, f))
1021 if not islink and not os.path.exists(p):
1030 if not islink and not os.path.exists(p):
1022 self.ui.warn(_("%s does not exist!\n") % f)
1031 self.ui.warn(_("%s does not exist!\n") % f)
1023 elif not islink and not os.path.isfile(p):
1032 elif not islink and not os.path.isfile(p):
1024 self.ui.warn(_("%s not added: only files and symlinks "
1033 self.ui.warn(_("%s not added: only files and symlinks "
1025 "supported currently\n") % f)
1034 "supported currently\n") % f)
1026 elif self.dirstate.state(f) in 'an':
1035 elif self.dirstate.state(f) in 'an':
1027 self.ui.warn(_("%s already tracked!\n") % f)
1036 self.ui.warn(_("%s already tracked!\n") % f)
1028 else:
1037 else:
1029 self.dirstate.update([f], "a")
1038 self.dirstate.update([f], "a")
1030
1039
1031 def forget(self, list, wlock=None):
1040 def forget(self, list, wlock=None):
1032 if not wlock:
1041 if not wlock:
1033 wlock = self.wlock()
1042 wlock = self.wlock()
1034 for f in list:
1043 for f in list:
1035 if self.dirstate.state(f) not in 'ai':
1044 if self.dirstate.state(f) not in 'ai':
1036 self.ui.warn(_("%s not added!\n") % f)
1045 self.ui.warn(_("%s not added!\n") % f)
1037 else:
1046 else:
1038 self.dirstate.forget([f])
1047 self.dirstate.forget([f])
1039
1048
1040 def remove(self, list, unlink=False, wlock=None):
1049 def remove(self, list, unlink=False, wlock=None):
1041 if unlink:
1050 if unlink:
1042 for f in list:
1051 for f in list:
1043 try:
1052 try:
1044 util.unlink(self.wjoin(f))
1053 util.unlink(self.wjoin(f))
1045 except OSError, inst:
1054 except OSError, inst:
1046 if inst.errno != errno.ENOENT:
1055 if inst.errno != errno.ENOENT:
1047 raise
1056 raise
1048 if not wlock:
1057 if not wlock:
1049 wlock = self.wlock()
1058 wlock = self.wlock()
1050 for f in list:
1059 for f in list:
1051 if unlink and os.path.exists(self.wjoin(f)):
1060 if unlink and os.path.exists(self.wjoin(f)):
1052 self.ui.warn(_("%s still exists!\n") % f)
1061 self.ui.warn(_("%s still exists!\n") % f)
1053 elif self.dirstate.state(f) == 'a':
1062 elif self.dirstate.state(f) == 'a':
1054 self.dirstate.forget([f])
1063 self.dirstate.forget([f])
1055 elif f not in self.dirstate:
1064 elif f not in self.dirstate:
1056 self.ui.warn(_("%s not tracked!\n") % f)
1065 self.ui.warn(_("%s not tracked!\n") % f)
1057 else:
1066 else:
1058 self.dirstate.update([f], "r")
1067 self.dirstate.update([f], "r")
1059
1068
1060 def undelete(self, list, wlock=None):
1069 def undelete(self, list, wlock=None):
1061 p = self.dirstate.parents()[0]
1070 p = self.dirstate.parents()[0]
1062 mn = self.changelog.read(p)[0]
1071 mn = self.changelog.read(p)[0]
1063 m = self.manifest.read(mn)
1072 m = self.manifest.read(mn)
1064 if not wlock:
1073 if not wlock:
1065 wlock = self.wlock()
1074 wlock = self.wlock()
1066 for f in list:
1075 for f in list:
1067 if self.dirstate.state(f) not in "r":
1076 if self.dirstate.state(f) not in "r":
1068 self.ui.warn("%s not removed!\n" % f)
1077 self.ui.warn("%s not removed!\n" % f)
1069 else:
1078 else:
1070 t = self.file(f).read(m[f])
1079 t = self.file(f).read(m[f])
1071 self.wwrite(f, t, m.flags(f))
1080 self.wwrite(f, t, m.flags(f))
1072 self.dirstate.update([f], "n")
1081 self.dirstate.update([f], "n")
1073
1082
1074 def copy(self, source, dest, wlock=None):
1083 def copy(self, source, dest, wlock=None):
1075 p = self.wjoin(dest)
1084 p = self.wjoin(dest)
1076 if not (os.path.exists(p) or os.path.islink(p)):
1085 if not (os.path.exists(p) or os.path.islink(p)):
1077 self.ui.warn(_("%s does not exist!\n") % dest)
1086 self.ui.warn(_("%s does not exist!\n") % dest)
1078 elif not (os.path.isfile(p) or os.path.islink(p)):
1087 elif not (os.path.isfile(p) or os.path.islink(p)):
1079 self.ui.warn(_("copy failed: %s is not a file or a "
1088 self.ui.warn(_("copy failed: %s is not a file or a "
1080 "symbolic link\n") % dest)
1089 "symbolic link\n") % dest)
1081 else:
1090 else:
1082 if not wlock:
1091 if not wlock:
1083 wlock = self.wlock()
1092 wlock = self.wlock()
1084 if self.dirstate.state(dest) == '?':
1093 if self.dirstate.state(dest) == '?':
1085 self.dirstate.update([dest], "a")
1094 self.dirstate.update([dest], "a")
1086 self.dirstate.copy(source, dest)
1095 self.dirstate.copy(source, dest)
1087
1096
1088 def heads(self, start=None):
1097 def heads(self, start=None):
1089 heads = self.changelog.heads(start)
1098 heads = self.changelog.heads(start)
1090 # sort the output in rev descending order
1099 # sort the output in rev descending order
1091 heads = [(-self.changelog.rev(h), h) for h in heads]
1100 heads = [(-self.changelog.rev(h), h) for h in heads]
1092 heads.sort()
1101 heads.sort()
1093 return [n for (r, n) in heads]
1102 return [n for (r, n) in heads]
1094
1103
1095 def branches(self, nodes):
1104 def branches(self, nodes):
1096 if not nodes:
1105 if not nodes:
1097 nodes = [self.changelog.tip()]
1106 nodes = [self.changelog.tip()]
1098 b = []
1107 b = []
1099 for n in nodes:
1108 for n in nodes:
1100 t = n
1109 t = n
1101 while 1:
1110 while 1:
1102 p = self.changelog.parents(n)
1111 p = self.changelog.parents(n)
1103 if p[1] != nullid or p[0] == nullid:
1112 if p[1] != nullid or p[0] == nullid:
1104 b.append((t, n, p[0], p[1]))
1113 b.append((t, n, p[0], p[1]))
1105 break
1114 break
1106 n = p[0]
1115 n = p[0]
1107 return b
1116 return b
1108
1117
1109 def between(self, pairs):
1118 def between(self, pairs):
1110 r = []
1119 r = []
1111
1120
1112 for top, bottom in pairs:
1121 for top, bottom in pairs:
1113 n, l, i = top, [], 0
1122 n, l, i = top, [], 0
1114 f = 1
1123 f = 1
1115
1124
1116 while n != bottom:
1125 while n != bottom:
1117 p = self.changelog.parents(n)[0]
1126 p = self.changelog.parents(n)[0]
1118 if i == f:
1127 if i == f:
1119 l.append(n)
1128 l.append(n)
1120 f = f * 2
1129 f = f * 2
1121 n = p
1130 n = p
1122 i += 1
1131 i += 1
1123
1132
1124 r.append(l)
1133 r.append(l)
1125
1134
1126 return r
1135 return r
1127
1136
1128 def findincoming(self, remote, base=None, heads=None, force=False):
1137 def findincoming(self, remote, base=None, heads=None, force=False):
1129 """Return list of roots of the subsets of missing nodes from remote
1138 """Return list of roots of the subsets of missing nodes from remote
1130
1139
1131 If base dict is specified, assume that these nodes and their parents
1140 If base dict is specified, assume that these nodes and their parents
1132 exist on the remote side and that no child of a node of base exists
1141 exist on the remote side and that no child of a node of base exists
1133 in both remote and self.
1142 in both remote and self.
1134 Furthermore base will be updated to include the nodes that exists
1143 Furthermore base will be updated to include the nodes that exists
1135 in self and remote but no children exists in self and remote.
1144 in self and remote but no children exists in self and remote.
1136 If a list of heads is specified, return only nodes which are heads
1145 If a list of heads is specified, return only nodes which are heads
1137 or ancestors of these heads.
1146 or ancestors of these heads.
1138
1147
1139 All the ancestors of base are in self and in remote.
1148 All the ancestors of base are in self and in remote.
1140 All the descendants of the list returned are missing in self.
1149 All the descendants of the list returned are missing in self.
1141 (and so we know that the rest of the nodes are missing in remote, see
1150 (and so we know that the rest of the nodes are missing in remote, see
1142 outgoing)
1151 outgoing)
1143 """
1152 """
1144 m = self.changelog.nodemap
1153 m = self.changelog.nodemap
1145 search = []
1154 search = []
1146 fetch = {}
1155 fetch = {}
1147 seen = {}
1156 seen = {}
1148 seenbranch = {}
1157 seenbranch = {}
1149 if base == None:
1158 if base == None:
1150 base = {}
1159 base = {}
1151
1160
1152 if not heads:
1161 if not heads:
1153 heads = remote.heads()
1162 heads = remote.heads()
1154
1163
1155 if self.changelog.tip() == nullid:
1164 if self.changelog.tip() == nullid:
1156 base[nullid] = 1
1165 base[nullid] = 1
1157 if heads != [nullid]:
1166 if heads != [nullid]:
1158 return [nullid]
1167 return [nullid]
1159 return []
1168 return []
1160
1169
1161 # assume we're closer to the tip than the root
1170 # assume we're closer to the tip than the root
1162 # and start by examining the heads
1171 # and start by examining the heads
1163 self.ui.status(_("searching for changes\n"))
1172 self.ui.status(_("searching for changes\n"))
1164
1173
1165 unknown = []
1174 unknown = []
1166 for h in heads:
1175 for h in heads:
1167 if h not in m:
1176 if h not in m:
1168 unknown.append(h)
1177 unknown.append(h)
1169 else:
1178 else:
1170 base[h] = 1
1179 base[h] = 1
1171
1180
1172 if not unknown:
1181 if not unknown:
1173 return []
1182 return []
1174
1183
1175 req = dict.fromkeys(unknown)
1184 req = dict.fromkeys(unknown)
1176 reqcnt = 0
1185 reqcnt = 0
1177
1186
1178 # search through remote branches
1187 # search through remote branches
1179 # a 'branch' here is a linear segment of history, with four parts:
1188 # a 'branch' here is a linear segment of history, with four parts:
1180 # head, root, first parent, second parent
1189 # head, root, first parent, second parent
1181 # (a branch always has two parents (or none) by definition)
1190 # (a branch always has two parents (or none) by definition)
1182 unknown = remote.branches(unknown)
1191 unknown = remote.branches(unknown)
1183 while unknown:
1192 while unknown:
1184 r = []
1193 r = []
1185 while unknown:
1194 while unknown:
1186 n = unknown.pop(0)
1195 n = unknown.pop(0)
1187 if n[0] in seen:
1196 if n[0] in seen:
1188 continue
1197 continue
1189
1198
1190 self.ui.debug(_("examining %s:%s\n")
1199 self.ui.debug(_("examining %s:%s\n")
1191 % (short(n[0]), short(n[1])))
1200 % (short(n[0]), short(n[1])))
1192 if n[0] == nullid: # found the end of the branch
1201 if n[0] == nullid: # found the end of the branch
1193 pass
1202 pass
1194 elif n in seenbranch:
1203 elif n in seenbranch:
1195 self.ui.debug(_("branch already found\n"))
1204 self.ui.debug(_("branch already found\n"))
1196 continue
1205 continue
1197 elif n[1] and n[1] in m: # do we know the base?
1206 elif n[1] and n[1] in m: # do we know the base?
1198 self.ui.debug(_("found incomplete branch %s:%s\n")
1207 self.ui.debug(_("found incomplete branch %s:%s\n")
1199 % (short(n[0]), short(n[1])))
1208 % (short(n[0]), short(n[1])))
1200 search.append(n) # schedule branch range for scanning
1209 search.append(n) # schedule branch range for scanning
1201 seenbranch[n] = 1
1210 seenbranch[n] = 1
1202 else:
1211 else:
1203 if n[1] not in seen and n[1] not in fetch:
1212 if n[1] not in seen and n[1] not in fetch:
1204 if n[2] in m and n[3] in m:
1213 if n[2] in m and n[3] in m:
1205 self.ui.debug(_("found new changeset %s\n") %
1214 self.ui.debug(_("found new changeset %s\n") %
1206 short(n[1]))
1215 short(n[1]))
1207 fetch[n[1]] = 1 # earliest unknown
1216 fetch[n[1]] = 1 # earliest unknown
1208 for p in n[2:4]:
1217 for p in n[2:4]:
1209 if p in m:
1218 if p in m:
1210 base[p] = 1 # latest known
1219 base[p] = 1 # latest known
1211
1220
1212 for p in n[2:4]:
1221 for p in n[2:4]:
1213 if p not in req and p not in m:
1222 if p not in req and p not in m:
1214 r.append(p)
1223 r.append(p)
1215 req[p] = 1
1224 req[p] = 1
1216 seen[n[0]] = 1
1225 seen[n[0]] = 1
1217
1226
1218 if r:
1227 if r:
1219 reqcnt += 1
1228 reqcnt += 1
1220 self.ui.debug(_("request %d: %s\n") %
1229 self.ui.debug(_("request %d: %s\n") %
1221 (reqcnt, " ".join(map(short, r))))
1230 (reqcnt, " ".join(map(short, r))))
1222 for p in xrange(0, len(r), 10):
1231 for p in xrange(0, len(r), 10):
1223 for b in remote.branches(r[p:p+10]):
1232 for b in remote.branches(r[p:p+10]):
1224 self.ui.debug(_("received %s:%s\n") %
1233 self.ui.debug(_("received %s:%s\n") %
1225 (short(b[0]), short(b[1])))
1234 (short(b[0]), short(b[1])))
1226 unknown.append(b)
1235 unknown.append(b)
1227
1236
1228 # do binary search on the branches we found
1237 # do binary search on the branches we found
1229 while search:
1238 while search:
1230 n = search.pop(0)
1239 n = search.pop(0)
1231 reqcnt += 1
1240 reqcnt += 1
1232 l = remote.between([(n[0], n[1])])[0]
1241 l = remote.between([(n[0], n[1])])[0]
1233 l.append(n[1])
1242 l.append(n[1])
1234 p = n[0]
1243 p = n[0]
1235 f = 1
1244 f = 1
1236 for i in l:
1245 for i in l:
1237 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1246 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1238 if i in m:
1247 if i in m:
1239 if f <= 2:
1248 if f <= 2:
1240 self.ui.debug(_("found new branch changeset %s\n") %
1249 self.ui.debug(_("found new branch changeset %s\n") %
1241 short(p))
1250 short(p))
1242 fetch[p] = 1
1251 fetch[p] = 1
1243 base[i] = 1
1252 base[i] = 1
1244 else:
1253 else:
1245 self.ui.debug(_("narrowed branch search to %s:%s\n")
1254 self.ui.debug(_("narrowed branch search to %s:%s\n")
1246 % (short(p), short(i)))
1255 % (short(p), short(i)))
1247 search.append((p, i))
1256 search.append((p, i))
1248 break
1257 break
1249 p, f = i, f * 2
1258 p, f = i, f * 2
1250
1259
1251 # sanity check our fetch list
1260 # sanity check our fetch list
1252 for f in fetch.keys():
1261 for f in fetch.keys():
1253 if f in m:
1262 if f in m:
1254 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1263 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1255
1264
1256 if base.keys() == [nullid]:
1265 if base.keys() == [nullid]:
1257 if force:
1266 if force:
1258 self.ui.warn(_("warning: repository is unrelated\n"))
1267 self.ui.warn(_("warning: repository is unrelated\n"))
1259 else:
1268 else:
1260 raise util.Abort(_("repository is unrelated"))
1269 raise util.Abort(_("repository is unrelated"))
1261
1270
1262 self.ui.debug(_("found new changesets starting at ") +
1271 self.ui.debug(_("found new changesets starting at ") +
1263 " ".join([short(f) for f in fetch]) + "\n")
1272 " ".join([short(f) for f in fetch]) + "\n")
1264
1273
1265 self.ui.debug(_("%d total queries\n") % reqcnt)
1274 self.ui.debug(_("%d total queries\n") % reqcnt)
1266
1275
1267 return fetch.keys()
1276 return fetch.keys()
1268
1277
1269 def findoutgoing(self, remote, base=None, heads=None, force=False):
1278 def findoutgoing(self, remote, base=None, heads=None, force=False):
1270 """Return list of nodes that are roots of subsets not in remote
1279 """Return list of nodes that are roots of subsets not in remote
1271
1280
1272 If base dict is specified, assume that these nodes and their parents
1281 If base dict is specified, assume that these nodes and their parents
1273 exist on the remote side.
1282 exist on the remote side.
1274 If a list of heads is specified, return only nodes which are heads
1283 If a list of heads is specified, return only nodes which are heads
1275 or ancestors of these heads, and return a second element which
1284 or ancestors of these heads, and return a second element which
1276 contains all remote heads which get new children.
1285 contains all remote heads which get new children.
1277 """
1286 """
1278 if base == None:
1287 if base == None:
1279 base = {}
1288 base = {}
1280 self.findincoming(remote, base, heads, force=force)
1289 self.findincoming(remote, base, heads, force=force)
1281
1290
1282 self.ui.debug(_("common changesets up to ")
1291 self.ui.debug(_("common changesets up to ")
1283 + " ".join(map(short, base.keys())) + "\n")
1292 + " ".join(map(short, base.keys())) + "\n")
1284
1293
1285 remain = dict.fromkeys(self.changelog.nodemap)
1294 remain = dict.fromkeys(self.changelog.nodemap)
1286
1295
1287 # prune everything remote has from the tree
1296 # prune everything remote has from the tree
1288 del remain[nullid]
1297 del remain[nullid]
1289 remove = base.keys()
1298 remove = base.keys()
1290 while remove:
1299 while remove:
1291 n = remove.pop(0)
1300 n = remove.pop(0)
1292 if n in remain:
1301 if n in remain:
1293 del remain[n]
1302 del remain[n]
1294 for p in self.changelog.parents(n):
1303 for p in self.changelog.parents(n):
1295 remove.append(p)
1304 remove.append(p)
1296
1305
1297 # find every node whose parents have been pruned
1306 # find every node whose parents have been pruned
1298 subset = []
1307 subset = []
1299 # find every remote head that will get new children
1308 # find every remote head that will get new children
1300 updated_heads = {}
1309 updated_heads = {}
1301 for n in remain:
1310 for n in remain:
1302 p1, p2 = self.changelog.parents(n)
1311 p1, p2 = self.changelog.parents(n)
1303 if p1 not in remain and p2 not in remain:
1312 if p1 not in remain and p2 not in remain:
1304 subset.append(n)
1313 subset.append(n)
1305 if heads:
1314 if heads:
1306 if p1 in heads:
1315 if p1 in heads:
1307 updated_heads[p1] = True
1316 updated_heads[p1] = True
1308 if p2 in heads:
1317 if p2 in heads:
1309 updated_heads[p2] = True
1318 updated_heads[p2] = True
1310
1319
1311 # this is the set of all roots we have to push
1320 # this is the set of all roots we have to push
1312 if heads:
1321 if heads:
1313 return subset, updated_heads.keys()
1322 return subset, updated_heads.keys()
1314 else:
1323 else:
1315 return subset
1324 return subset
1316
1325
1317 def pull(self, remote, heads=None, force=False, lock=None):
1326 def pull(self, remote, heads=None, force=False, lock=None):
1318 mylock = False
1327 mylock = False
1319 if not lock:
1328 if not lock:
1320 lock = self.lock()
1329 lock = self.lock()
1321 mylock = True
1330 mylock = True
1322
1331
1323 try:
1332 try:
1324 fetch = self.findincoming(remote, force=force)
1333 fetch = self.findincoming(remote, force=force)
1325 if fetch == [nullid]:
1334 if fetch == [nullid]:
1326 self.ui.status(_("requesting all changes\n"))
1335 self.ui.status(_("requesting all changes\n"))
1327
1336
1328 if not fetch:
1337 if not fetch:
1329 self.ui.status(_("no changes found\n"))
1338 self.ui.status(_("no changes found\n"))
1330 return 0
1339 return 0
1331
1340
1332 if heads is None:
1341 if heads is None:
1333 cg = remote.changegroup(fetch, 'pull')
1342 cg = remote.changegroup(fetch, 'pull')
1334 else:
1343 else:
1335 if 'changegroupsubset' not in remote.capabilities:
1344 if 'changegroupsubset' not in remote.capabilities:
1336 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1345 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1337 cg = remote.changegroupsubset(fetch, heads, 'pull')
1346 cg = remote.changegroupsubset(fetch, heads, 'pull')
1338 return self.addchangegroup(cg, 'pull', remote.url())
1347 return self.addchangegroup(cg, 'pull', remote.url())
1339 finally:
1348 finally:
1340 if mylock:
1349 if mylock:
1341 lock.release()
1350 lock.release()
1342
1351
1343 def push(self, remote, force=False, revs=None):
1352 def push(self, remote, force=False, revs=None):
1344 # there are two ways to push to remote repo:
1353 # there are two ways to push to remote repo:
1345 #
1354 #
1346 # addchangegroup assumes local user can lock remote
1355 # addchangegroup assumes local user can lock remote
1347 # repo (local filesystem, old ssh servers).
1356 # repo (local filesystem, old ssh servers).
1348 #
1357 #
1349 # unbundle assumes local user cannot lock remote repo (new ssh
1358 # unbundle assumes local user cannot lock remote repo (new ssh
1350 # servers, http servers).
1359 # servers, http servers).
1351
1360
1352 if remote.capable('unbundle'):
1361 if remote.capable('unbundle'):
1353 return self.push_unbundle(remote, force, revs)
1362 return self.push_unbundle(remote, force, revs)
1354 return self.push_addchangegroup(remote, force, revs)
1363 return self.push_addchangegroup(remote, force, revs)
1355
1364
1356 def prepush(self, remote, force, revs):
1365 def prepush(self, remote, force, revs):
1357 base = {}
1366 base = {}
1358 remote_heads = remote.heads()
1367 remote_heads = remote.heads()
1359 inc = self.findincoming(remote, base, remote_heads, force=force)
1368 inc = self.findincoming(remote, base, remote_heads, force=force)
1360
1369
1361 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1370 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1362 if revs is not None:
1371 if revs is not None:
1363 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1372 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1364 else:
1373 else:
1365 bases, heads = update, self.changelog.heads()
1374 bases, heads = update, self.changelog.heads()
1366
1375
1367 if not bases:
1376 if not bases:
1368 self.ui.status(_("no changes found\n"))
1377 self.ui.status(_("no changes found\n"))
1369 return None, 1
1378 return None, 1
1370 elif not force:
1379 elif not force:
1371 # check if we're creating new remote heads
1380 # check if we're creating new remote heads
1372 # to be a remote head after push, node must be either
1381 # to be a remote head after push, node must be either
1373 # - unknown locally
1382 # - unknown locally
1374 # - a local outgoing head descended from update
1383 # - a local outgoing head descended from update
1375 # - a remote head that's known locally and not
1384 # - a remote head that's known locally and not
1376 # ancestral to an outgoing head
1385 # ancestral to an outgoing head
1377
1386
1378 warn = 0
1387 warn = 0
1379
1388
1380 if remote_heads == [nullid]:
1389 if remote_heads == [nullid]:
1381 warn = 0
1390 warn = 0
1382 elif not revs and len(heads) > len(remote_heads):
1391 elif not revs and len(heads) > len(remote_heads):
1383 warn = 1
1392 warn = 1
1384 else:
1393 else:
1385 newheads = list(heads)
1394 newheads = list(heads)
1386 for r in remote_heads:
1395 for r in remote_heads:
1387 if r in self.changelog.nodemap:
1396 if r in self.changelog.nodemap:
1388 desc = self.changelog.heads(r, heads)
1397 desc = self.changelog.heads(r, heads)
1389 l = [h for h in heads if h in desc]
1398 l = [h for h in heads if h in desc]
1390 if not l:
1399 if not l:
1391 newheads.append(r)
1400 newheads.append(r)
1392 else:
1401 else:
1393 newheads.append(r)
1402 newheads.append(r)
1394 if len(newheads) > len(remote_heads):
1403 if len(newheads) > len(remote_heads):
1395 warn = 1
1404 warn = 1
1396
1405
1397 if warn:
1406 if warn:
1398 self.ui.warn(_("abort: push creates new remote branches!\n"))
1407 self.ui.warn(_("abort: push creates new remote branches!\n"))
1399 self.ui.status(_("(did you forget to merge?"
1408 self.ui.status(_("(did you forget to merge?"
1400 " use push -f to force)\n"))
1409 " use push -f to force)\n"))
1401 return None, 1
1410 return None, 1
1402 elif inc:
1411 elif inc:
1403 self.ui.warn(_("note: unsynced remote changes!\n"))
1412 self.ui.warn(_("note: unsynced remote changes!\n"))
1404
1413
1405
1414
1406 if revs is None:
1415 if revs is None:
1407 cg = self.changegroup(update, 'push')
1416 cg = self.changegroup(update, 'push')
1408 else:
1417 else:
1409 cg = self.changegroupsubset(update, revs, 'push')
1418 cg = self.changegroupsubset(update, revs, 'push')
1410 return cg, remote_heads
1419 return cg, remote_heads
1411
1420
1412 def push_addchangegroup(self, remote, force, revs):
1421 def push_addchangegroup(self, remote, force, revs):
1413 lock = remote.lock()
1422 lock = remote.lock()
1414
1423
1415 ret = self.prepush(remote, force, revs)
1424 ret = self.prepush(remote, force, revs)
1416 if ret[0] is not None:
1425 if ret[0] is not None:
1417 cg, remote_heads = ret
1426 cg, remote_heads = ret
1418 return remote.addchangegroup(cg, 'push', self.url())
1427 return remote.addchangegroup(cg, 'push', self.url())
1419 return ret[1]
1428 return ret[1]
1420
1429
1421 def push_unbundle(self, remote, force, revs):
1430 def push_unbundle(self, remote, force, revs):
1422 # local repo finds heads on server, finds out what revs it
1431 # local repo finds heads on server, finds out what revs it
1423 # must push. once revs transferred, if server finds it has
1432 # must push. once revs transferred, if server finds it has
1424 # different heads (someone else won commit/push race), server
1433 # different heads (someone else won commit/push race), server
1425 # aborts.
1434 # aborts.
1426
1435
1427 ret = self.prepush(remote, force, revs)
1436 ret = self.prepush(remote, force, revs)
1428 if ret[0] is not None:
1437 if ret[0] is not None:
1429 cg, remote_heads = ret
1438 cg, remote_heads = ret
1430 if force: remote_heads = ['force']
1439 if force: remote_heads = ['force']
1431 return remote.unbundle(cg, remote_heads, 'push')
1440 return remote.unbundle(cg, remote_heads, 'push')
1432 return ret[1]
1441 return ret[1]
1433
1442
1434 def changegroupinfo(self, nodes):
1443 def changegroupinfo(self, nodes):
1435 self.ui.note(_("%d changesets found\n") % len(nodes))
1444 self.ui.note(_("%d changesets found\n") % len(nodes))
1436 if self.ui.debugflag:
1445 if self.ui.debugflag:
1437 self.ui.debug(_("List of changesets:\n"))
1446 self.ui.debug(_("List of changesets:\n"))
1438 for node in nodes:
1447 for node in nodes:
1439 self.ui.debug("%s\n" % hex(node))
1448 self.ui.debug("%s\n" % hex(node))
1440
1449
1441 def changegroupsubset(self, bases, heads, source):
1450 def changegroupsubset(self, bases, heads, source):
1442 """This function generates a changegroup consisting of all the nodes
1451 """This function generates a changegroup consisting of all the nodes
1443 that are descendents of any of the bases, and ancestors of any of
1452 that are descendents of any of the bases, and ancestors of any of
1444 the heads.
1453 the heads.
1445
1454
1446 It is fairly complex as determining which filenodes and which
1455 It is fairly complex as determining which filenodes and which
1447 manifest nodes need to be included for the changeset to be complete
1456 manifest nodes need to be included for the changeset to be complete
1448 is non-trivial.
1457 is non-trivial.
1449
1458
1450 Another wrinkle is doing the reverse, figuring out which changeset in
1459 Another wrinkle is doing the reverse, figuring out which changeset in
1451 the changegroup a particular filenode or manifestnode belongs to."""
1460 the changegroup a particular filenode or manifestnode belongs to."""
1452
1461
1453 self.hook('preoutgoing', throw=True, source=source)
1462 self.hook('preoutgoing', throw=True, source=source)
1454
1463
1455 # Set up some initial variables
1464 # Set up some initial variables
1456 # Make it easy to refer to self.changelog
1465 # Make it easy to refer to self.changelog
1457 cl = self.changelog
1466 cl = self.changelog
1458 # msng is short for missing - compute the list of changesets in this
1467 # msng is short for missing - compute the list of changesets in this
1459 # changegroup.
1468 # changegroup.
1460 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1469 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1461 self.changegroupinfo(msng_cl_lst)
1470 self.changegroupinfo(msng_cl_lst)
1462 # Some bases may turn out to be superfluous, and some heads may be
1471 # Some bases may turn out to be superfluous, and some heads may be
1463 # too. nodesbetween will return the minimal set of bases and heads
1472 # too. nodesbetween will return the minimal set of bases and heads
1464 # necessary to re-create the changegroup.
1473 # necessary to re-create the changegroup.
1465
1474
1466 # Known heads are the list of heads that it is assumed the recipient
1475 # Known heads are the list of heads that it is assumed the recipient
1467 # of this changegroup will know about.
1476 # of this changegroup will know about.
1468 knownheads = {}
1477 knownheads = {}
1469 # We assume that all parents of bases are known heads.
1478 # We assume that all parents of bases are known heads.
1470 for n in bases:
1479 for n in bases:
1471 for p in cl.parents(n):
1480 for p in cl.parents(n):
1472 if p != nullid:
1481 if p != nullid:
1473 knownheads[p] = 1
1482 knownheads[p] = 1
1474 knownheads = knownheads.keys()
1483 knownheads = knownheads.keys()
1475 if knownheads:
1484 if knownheads:
1476 # Now that we know what heads are known, we can compute which
1485 # Now that we know what heads are known, we can compute which
1477 # changesets are known. The recipient must know about all
1486 # changesets are known. The recipient must know about all
1478 # changesets required to reach the known heads from the null
1487 # changesets required to reach the known heads from the null
1479 # changeset.
1488 # changeset.
1480 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1489 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1481 junk = None
1490 junk = None
1482 # Transform the list into an ersatz set.
1491 # Transform the list into an ersatz set.
1483 has_cl_set = dict.fromkeys(has_cl_set)
1492 has_cl_set = dict.fromkeys(has_cl_set)
1484 else:
1493 else:
1485 # If there were no known heads, the recipient cannot be assumed to
1494 # If there were no known heads, the recipient cannot be assumed to
1486 # know about any changesets.
1495 # know about any changesets.
1487 has_cl_set = {}
1496 has_cl_set = {}
1488
1497
1489 # Make it easy to refer to self.manifest
1498 # Make it easy to refer to self.manifest
1490 mnfst = self.manifest
1499 mnfst = self.manifest
1491 # We don't know which manifests are missing yet
1500 # We don't know which manifests are missing yet
1492 msng_mnfst_set = {}
1501 msng_mnfst_set = {}
1493 # Nor do we know which filenodes are missing.
1502 # Nor do we know which filenodes are missing.
1494 msng_filenode_set = {}
1503 msng_filenode_set = {}
1495
1504
1496 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1505 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1497 junk = None
1506 junk = None
1498
1507
1499 # A changeset always belongs to itself, so the changenode lookup
1508 # A changeset always belongs to itself, so the changenode lookup
1500 # function for a changenode is identity.
1509 # function for a changenode is identity.
1501 def identity(x):
1510 def identity(x):
1502 return x
1511 return x
1503
1512
1504 # A function generating function. Sets up an environment for the
1513 # A function generating function. Sets up an environment for the
1505 # inner function.
1514 # inner function.
1506 def cmp_by_rev_func(revlog):
1515 def cmp_by_rev_func(revlog):
1507 # Compare two nodes by their revision number in the environment's
1516 # Compare two nodes by their revision number in the environment's
1508 # revision history. Since the revision number both represents the
1517 # revision history. Since the revision number both represents the
1509 # most efficient order to read the nodes in, and represents a
1518 # most efficient order to read the nodes in, and represents a
1510 # topological sorting of the nodes, this function is often useful.
1519 # topological sorting of the nodes, this function is often useful.
1511 def cmp_by_rev(a, b):
1520 def cmp_by_rev(a, b):
1512 return cmp(revlog.rev(a), revlog.rev(b))
1521 return cmp(revlog.rev(a), revlog.rev(b))
1513 return cmp_by_rev
1522 return cmp_by_rev
1514
1523
1515 # If we determine that a particular file or manifest node must be a
1524 # If we determine that a particular file or manifest node must be a
1516 # node that the recipient of the changegroup will already have, we can
1525 # node that the recipient of the changegroup will already have, we can
1517 # also assume the recipient will have all the parents. This function
1526 # also assume the recipient will have all the parents. This function
1518 # prunes them from the set of missing nodes.
1527 # prunes them from the set of missing nodes.
1519 def prune_parents(revlog, hasset, msngset):
1528 def prune_parents(revlog, hasset, msngset):
1520 haslst = hasset.keys()
1529 haslst = hasset.keys()
1521 haslst.sort(cmp_by_rev_func(revlog))
1530 haslst.sort(cmp_by_rev_func(revlog))
1522 for node in haslst:
1531 for node in haslst:
1523 parentlst = [p for p in revlog.parents(node) if p != nullid]
1532 parentlst = [p for p in revlog.parents(node) if p != nullid]
1524 while parentlst:
1533 while parentlst:
1525 n = parentlst.pop()
1534 n = parentlst.pop()
1526 if n not in hasset:
1535 if n not in hasset:
1527 hasset[n] = 1
1536 hasset[n] = 1
1528 p = [p for p in revlog.parents(n) if p != nullid]
1537 p = [p for p in revlog.parents(n) if p != nullid]
1529 parentlst.extend(p)
1538 parentlst.extend(p)
1530 for n in hasset:
1539 for n in hasset:
1531 msngset.pop(n, None)
1540 msngset.pop(n, None)
1532
1541
1533 # This is a function generating function used to set up an environment
1542 # This is a function generating function used to set up an environment
1534 # for the inner function to execute in.
1543 # for the inner function to execute in.
1535 def manifest_and_file_collector(changedfileset):
1544 def manifest_and_file_collector(changedfileset):
1536 # This is an information gathering function that gathers
1545 # This is an information gathering function that gathers
1537 # information from each changeset node that goes out as part of
1546 # information from each changeset node that goes out as part of
1538 # the changegroup. The information gathered is a list of which
1547 # the changegroup. The information gathered is a list of which
1539 # manifest nodes are potentially required (the recipient may
1548 # manifest nodes are potentially required (the recipient may
1540 # already have them) and total list of all files which were
1549 # already have them) and total list of all files which were
1541 # changed in any changeset in the changegroup.
1550 # changed in any changeset in the changegroup.
1542 #
1551 #
1543 # We also remember the first changenode we saw any manifest
1552 # We also remember the first changenode we saw any manifest
1544 # referenced by so we can later determine which changenode 'owns'
1553 # referenced by so we can later determine which changenode 'owns'
1545 # the manifest.
1554 # the manifest.
1546 def collect_manifests_and_files(clnode):
1555 def collect_manifests_and_files(clnode):
1547 c = cl.read(clnode)
1556 c = cl.read(clnode)
1548 for f in c[3]:
1557 for f in c[3]:
1549 # This is to make sure we only have one instance of each
1558 # This is to make sure we only have one instance of each
1550 # filename string for each filename.
1559 # filename string for each filename.
1551 changedfileset.setdefault(f, f)
1560 changedfileset.setdefault(f, f)
1552 msng_mnfst_set.setdefault(c[0], clnode)
1561 msng_mnfst_set.setdefault(c[0], clnode)
1553 return collect_manifests_and_files
1562 return collect_manifests_and_files
1554
1563
1555 # Figure out which manifest nodes (of the ones we think might be part
1564 # Figure out which manifest nodes (of the ones we think might be part
1556 # of the changegroup) the recipient must know about and remove them
1565 # of the changegroup) the recipient must know about and remove them
1557 # from the changegroup.
1566 # from the changegroup.
1558 def prune_manifests():
1567 def prune_manifests():
1559 has_mnfst_set = {}
1568 has_mnfst_set = {}
1560 for n in msng_mnfst_set:
1569 for n in msng_mnfst_set:
1561 # If a 'missing' manifest thinks it belongs to a changenode
1570 # If a 'missing' manifest thinks it belongs to a changenode
1562 # the recipient is assumed to have, obviously the recipient
1571 # the recipient is assumed to have, obviously the recipient
1563 # must have that manifest.
1572 # must have that manifest.
1564 linknode = cl.node(mnfst.linkrev(n))
1573 linknode = cl.node(mnfst.linkrev(n))
1565 if linknode in has_cl_set:
1574 if linknode in has_cl_set:
1566 has_mnfst_set[n] = 1
1575 has_mnfst_set[n] = 1
1567 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1576 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1568
1577
1569 # Use the information collected in collect_manifests_and_files to say
1578 # Use the information collected in collect_manifests_and_files to say
1570 # which changenode any manifestnode belongs to.
1579 # which changenode any manifestnode belongs to.
1571 def lookup_manifest_link(mnfstnode):
1580 def lookup_manifest_link(mnfstnode):
1572 return msng_mnfst_set[mnfstnode]
1581 return msng_mnfst_set[mnfstnode]
1573
1582
1574 # A function generating function that sets up the initial environment
1583 # A function generating function that sets up the initial environment
1575 # the inner function.
1584 # the inner function.
1576 def filenode_collector(changedfiles):
1585 def filenode_collector(changedfiles):
1577 next_rev = [0]
1586 next_rev = [0]
1578 # This gathers information from each manifestnode included in the
1587 # This gathers information from each manifestnode included in the
1579 # changegroup about which filenodes the manifest node references
1588 # changegroup about which filenodes the manifest node references
1580 # so we can include those in the changegroup too.
1589 # so we can include those in the changegroup too.
1581 #
1590 #
1582 # It also remembers which changenode each filenode belongs to. It
1591 # It also remembers which changenode each filenode belongs to. It
1583 # does this by assuming the a filenode belongs to the changenode
1592 # does this by assuming the a filenode belongs to the changenode
1584 # the first manifest that references it belongs to.
1593 # the first manifest that references it belongs to.
1585 def collect_msng_filenodes(mnfstnode):
1594 def collect_msng_filenodes(mnfstnode):
1586 r = mnfst.rev(mnfstnode)
1595 r = mnfst.rev(mnfstnode)
1587 if r == next_rev[0]:
1596 if r == next_rev[0]:
1588 # If the last rev we looked at was the one just previous,
1597 # If the last rev we looked at was the one just previous,
1589 # we only need to see a diff.
1598 # we only need to see a diff.
1590 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1599 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1591 # For each line in the delta
1600 # For each line in the delta
1592 for dline in delta.splitlines():
1601 for dline in delta.splitlines():
1593 # get the filename and filenode for that line
1602 # get the filename and filenode for that line
1594 f, fnode = dline.split('\0')
1603 f, fnode = dline.split('\0')
1595 fnode = bin(fnode[:40])
1604 fnode = bin(fnode[:40])
1596 f = changedfiles.get(f, None)
1605 f = changedfiles.get(f, None)
1597 # And if the file is in the list of files we care
1606 # And if the file is in the list of files we care
1598 # about.
1607 # about.
1599 if f is not None:
1608 if f is not None:
1600 # Get the changenode this manifest belongs to
1609 # Get the changenode this manifest belongs to
1601 clnode = msng_mnfst_set[mnfstnode]
1610 clnode = msng_mnfst_set[mnfstnode]
1602 # Create the set of filenodes for the file if
1611 # Create the set of filenodes for the file if
1603 # there isn't one already.
1612 # there isn't one already.
1604 ndset = msng_filenode_set.setdefault(f, {})
1613 ndset = msng_filenode_set.setdefault(f, {})
1605 # And set the filenode's changelog node to the
1614 # And set the filenode's changelog node to the
1606 # manifest's if it hasn't been set already.
1615 # manifest's if it hasn't been set already.
1607 ndset.setdefault(fnode, clnode)
1616 ndset.setdefault(fnode, clnode)
1608 else:
1617 else:
1609 # Otherwise we need a full manifest.
1618 # Otherwise we need a full manifest.
1610 m = mnfst.read(mnfstnode)
1619 m = mnfst.read(mnfstnode)
1611 # For every file in we care about.
1620 # For every file in we care about.
1612 for f in changedfiles:
1621 for f in changedfiles:
1613 fnode = m.get(f, None)
1622 fnode = m.get(f, None)
1614 # If it's in the manifest
1623 # If it's in the manifest
1615 if fnode is not None:
1624 if fnode is not None:
1616 # See comments above.
1625 # See comments above.
1617 clnode = msng_mnfst_set[mnfstnode]
1626 clnode = msng_mnfst_set[mnfstnode]
1618 ndset = msng_filenode_set.setdefault(f, {})
1627 ndset = msng_filenode_set.setdefault(f, {})
1619 ndset.setdefault(fnode, clnode)
1628 ndset.setdefault(fnode, clnode)
1620 # Remember the revision we hope to see next.
1629 # Remember the revision we hope to see next.
1621 next_rev[0] = r + 1
1630 next_rev[0] = r + 1
1622 return collect_msng_filenodes
1631 return collect_msng_filenodes
1623
1632
1624 # We have a list of filenodes we think we need for a file, lets remove
1633 # We have a list of filenodes we think we need for a file, lets remove
1625 # all those we now the recipient must have.
1634 # all those we now the recipient must have.
1626 def prune_filenodes(f, filerevlog):
1635 def prune_filenodes(f, filerevlog):
1627 msngset = msng_filenode_set[f]
1636 msngset = msng_filenode_set[f]
1628 hasset = {}
1637 hasset = {}
1629 # If a 'missing' filenode thinks it belongs to a changenode we
1638 # If a 'missing' filenode thinks it belongs to a changenode we
1630 # assume the recipient must have, then the recipient must have
1639 # assume the recipient must have, then the recipient must have
1631 # that filenode.
1640 # that filenode.
1632 for n in msngset:
1641 for n in msngset:
1633 clnode = cl.node(filerevlog.linkrev(n))
1642 clnode = cl.node(filerevlog.linkrev(n))
1634 if clnode in has_cl_set:
1643 if clnode in has_cl_set:
1635 hasset[n] = 1
1644 hasset[n] = 1
1636 prune_parents(filerevlog, hasset, msngset)
1645 prune_parents(filerevlog, hasset, msngset)
1637
1646
1638 # A function generator function that sets up the a context for the
1647 # A function generator function that sets up the a context for the
1639 # inner function.
1648 # inner function.
1640 def lookup_filenode_link_func(fname):
1649 def lookup_filenode_link_func(fname):
1641 msngset = msng_filenode_set[fname]
1650 msngset = msng_filenode_set[fname]
1642 # Lookup the changenode the filenode belongs to.
1651 # Lookup the changenode the filenode belongs to.
1643 def lookup_filenode_link(fnode):
1652 def lookup_filenode_link(fnode):
1644 return msngset[fnode]
1653 return msngset[fnode]
1645 return lookup_filenode_link
1654 return lookup_filenode_link
1646
1655
1647 # Now that we have all theses utility functions to help out and
1656 # Now that we have all theses utility functions to help out and
1648 # logically divide up the task, generate the group.
1657 # logically divide up the task, generate the group.
1649 def gengroup():
1658 def gengroup():
1650 # The set of changed files starts empty.
1659 # The set of changed files starts empty.
1651 changedfiles = {}
1660 changedfiles = {}
1652 # Create a changenode group generator that will call our functions
1661 # Create a changenode group generator that will call our functions
1653 # back to lookup the owning changenode and collect information.
1662 # back to lookup the owning changenode and collect information.
1654 group = cl.group(msng_cl_lst, identity,
1663 group = cl.group(msng_cl_lst, identity,
1655 manifest_and_file_collector(changedfiles))
1664 manifest_and_file_collector(changedfiles))
1656 for chnk in group:
1665 for chnk in group:
1657 yield chnk
1666 yield chnk
1658
1667
1659 # The list of manifests has been collected by the generator
1668 # The list of manifests has been collected by the generator
1660 # calling our functions back.
1669 # calling our functions back.
1661 prune_manifests()
1670 prune_manifests()
1662 msng_mnfst_lst = msng_mnfst_set.keys()
1671 msng_mnfst_lst = msng_mnfst_set.keys()
1663 # Sort the manifestnodes by revision number.
1672 # Sort the manifestnodes by revision number.
1664 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1673 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1665 # Create a generator for the manifestnodes that calls our lookup
1674 # Create a generator for the manifestnodes that calls our lookup
1666 # and data collection functions back.
1675 # and data collection functions back.
1667 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1676 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1668 filenode_collector(changedfiles))
1677 filenode_collector(changedfiles))
1669 for chnk in group:
1678 for chnk in group:
1670 yield chnk
1679 yield chnk
1671
1680
1672 # These are no longer needed, dereference and toss the memory for
1681 # These are no longer needed, dereference and toss the memory for
1673 # them.
1682 # them.
1674 msng_mnfst_lst = None
1683 msng_mnfst_lst = None
1675 msng_mnfst_set.clear()
1684 msng_mnfst_set.clear()
1676
1685
1677 changedfiles = changedfiles.keys()
1686 changedfiles = changedfiles.keys()
1678 changedfiles.sort()
1687 changedfiles.sort()
1679 # Go through all our files in order sorted by name.
1688 # Go through all our files in order sorted by name.
1680 for fname in changedfiles:
1689 for fname in changedfiles:
1681 filerevlog = self.file(fname)
1690 filerevlog = self.file(fname)
1682 # Toss out the filenodes that the recipient isn't really
1691 # Toss out the filenodes that the recipient isn't really
1683 # missing.
1692 # missing.
1684 if msng_filenode_set.has_key(fname):
1693 if msng_filenode_set.has_key(fname):
1685 prune_filenodes(fname, filerevlog)
1694 prune_filenodes(fname, filerevlog)
1686 msng_filenode_lst = msng_filenode_set[fname].keys()
1695 msng_filenode_lst = msng_filenode_set[fname].keys()
1687 else:
1696 else:
1688 msng_filenode_lst = []
1697 msng_filenode_lst = []
1689 # If any filenodes are left, generate the group for them,
1698 # If any filenodes are left, generate the group for them,
1690 # otherwise don't bother.
1699 # otherwise don't bother.
1691 if len(msng_filenode_lst) > 0:
1700 if len(msng_filenode_lst) > 0:
1692 yield changegroup.genchunk(fname)
1701 yield changegroup.genchunk(fname)
1693 # Sort the filenodes by their revision #
1702 # Sort the filenodes by their revision #
1694 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1703 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1695 # Create a group generator and only pass in a changenode
1704 # Create a group generator and only pass in a changenode
1696 # lookup function as we need to collect no information
1705 # lookup function as we need to collect no information
1697 # from filenodes.
1706 # from filenodes.
1698 group = filerevlog.group(msng_filenode_lst,
1707 group = filerevlog.group(msng_filenode_lst,
1699 lookup_filenode_link_func(fname))
1708 lookup_filenode_link_func(fname))
1700 for chnk in group:
1709 for chnk in group:
1701 yield chnk
1710 yield chnk
1702 if msng_filenode_set.has_key(fname):
1711 if msng_filenode_set.has_key(fname):
1703 # Don't need this anymore, toss it to free memory.
1712 # Don't need this anymore, toss it to free memory.
1704 del msng_filenode_set[fname]
1713 del msng_filenode_set[fname]
1705 # Signal that no more groups are left.
1714 # Signal that no more groups are left.
1706 yield changegroup.closechunk()
1715 yield changegroup.closechunk()
1707
1716
1708 if msng_cl_lst:
1717 if msng_cl_lst:
1709 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1718 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1710
1719
1711 return util.chunkbuffer(gengroup())
1720 return util.chunkbuffer(gengroup())
1712
1721
1713 def changegroup(self, basenodes, source):
1722 def changegroup(self, basenodes, source):
1714 """Generate a changegroup of all nodes that we have that a recipient
1723 """Generate a changegroup of all nodes that we have that a recipient
1715 doesn't.
1724 doesn't.
1716
1725
1717 This is much easier than the previous function as we can assume that
1726 This is much easier than the previous function as we can assume that
1718 the recipient has any changenode we aren't sending them."""
1727 the recipient has any changenode we aren't sending them."""
1719
1728
1720 self.hook('preoutgoing', throw=True, source=source)
1729 self.hook('preoutgoing', throw=True, source=source)
1721
1730
1722 cl = self.changelog
1731 cl = self.changelog
1723 nodes = cl.nodesbetween(basenodes, None)[0]
1732 nodes = cl.nodesbetween(basenodes, None)[0]
1724 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1733 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1725 self.changegroupinfo(nodes)
1734 self.changegroupinfo(nodes)
1726
1735
1727 def identity(x):
1736 def identity(x):
1728 return x
1737 return x
1729
1738
1730 def gennodelst(revlog):
1739 def gennodelst(revlog):
1731 for r in xrange(0, revlog.count()):
1740 for r in xrange(0, revlog.count()):
1732 n = revlog.node(r)
1741 n = revlog.node(r)
1733 if revlog.linkrev(n) in revset:
1742 if revlog.linkrev(n) in revset:
1734 yield n
1743 yield n
1735
1744
1736 def changed_file_collector(changedfileset):
1745 def changed_file_collector(changedfileset):
1737 def collect_changed_files(clnode):
1746 def collect_changed_files(clnode):
1738 c = cl.read(clnode)
1747 c = cl.read(clnode)
1739 for fname in c[3]:
1748 for fname in c[3]:
1740 changedfileset[fname] = 1
1749 changedfileset[fname] = 1
1741 return collect_changed_files
1750 return collect_changed_files
1742
1751
1743 def lookuprevlink_func(revlog):
1752 def lookuprevlink_func(revlog):
1744 def lookuprevlink(n):
1753 def lookuprevlink(n):
1745 return cl.node(revlog.linkrev(n))
1754 return cl.node(revlog.linkrev(n))
1746 return lookuprevlink
1755 return lookuprevlink
1747
1756
1748 def gengroup():
1757 def gengroup():
1749 # construct a list of all changed files
1758 # construct a list of all changed files
1750 changedfiles = {}
1759 changedfiles = {}
1751
1760
1752 for chnk in cl.group(nodes, identity,
1761 for chnk in cl.group(nodes, identity,
1753 changed_file_collector(changedfiles)):
1762 changed_file_collector(changedfiles)):
1754 yield chnk
1763 yield chnk
1755 changedfiles = changedfiles.keys()
1764 changedfiles = changedfiles.keys()
1756 changedfiles.sort()
1765 changedfiles.sort()
1757
1766
1758 mnfst = self.manifest
1767 mnfst = self.manifest
1759 nodeiter = gennodelst(mnfst)
1768 nodeiter = gennodelst(mnfst)
1760 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1769 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1761 yield chnk
1770 yield chnk
1762
1771
1763 for fname in changedfiles:
1772 for fname in changedfiles:
1764 filerevlog = self.file(fname)
1773 filerevlog = self.file(fname)
1765 nodeiter = gennodelst(filerevlog)
1774 nodeiter = gennodelst(filerevlog)
1766 nodeiter = list(nodeiter)
1775 nodeiter = list(nodeiter)
1767 if nodeiter:
1776 if nodeiter:
1768 yield changegroup.genchunk(fname)
1777 yield changegroup.genchunk(fname)
1769 lookup = lookuprevlink_func(filerevlog)
1778 lookup = lookuprevlink_func(filerevlog)
1770 for chnk in filerevlog.group(nodeiter, lookup):
1779 for chnk in filerevlog.group(nodeiter, lookup):
1771 yield chnk
1780 yield chnk
1772
1781
1773 yield changegroup.closechunk()
1782 yield changegroup.closechunk()
1774
1783
1775 if nodes:
1784 if nodes:
1776 self.hook('outgoing', node=hex(nodes[0]), source=source)
1785 self.hook('outgoing', node=hex(nodes[0]), source=source)
1777
1786
1778 return util.chunkbuffer(gengroup())
1787 return util.chunkbuffer(gengroup())
1779
1788
1780 def addchangegroup(self, source, srctype, url):
1789 def addchangegroup(self, source, srctype, url):
1781 """add changegroup to repo.
1790 """add changegroup to repo.
1782
1791
1783 return values:
1792 return values:
1784 - nothing changed or no source: 0
1793 - nothing changed or no source: 0
1785 - more heads than before: 1+added heads (2..n)
1794 - more heads than before: 1+added heads (2..n)
1786 - less heads than before: -1-removed heads (-2..-n)
1795 - less heads than before: -1-removed heads (-2..-n)
1787 - number of heads stays the same: 1
1796 - number of heads stays the same: 1
1788 """
1797 """
1789 def csmap(x):
1798 def csmap(x):
1790 self.ui.debug(_("add changeset %s\n") % short(x))
1799 self.ui.debug(_("add changeset %s\n") % short(x))
1791 return cl.count()
1800 return cl.count()
1792
1801
1793 def revmap(x):
1802 def revmap(x):
1794 return cl.rev(x)
1803 return cl.rev(x)
1795
1804
1796 if not source:
1805 if not source:
1797 return 0
1806 return 0
1798
1807
1799 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1808 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1800
1809
1801 changesets = files = revisions = 0
1810 changesets = files = revisions = 0
1802
1811
1803 tr = self.transaction()
1812 tr = self.transaction()
1804
1813
1805 # write changelog data to temp files so concurrent readers will not see
1814 # write changelog data to temp files so concurrent readers will not see
1806 # inconsistent view
1815 # inconsistent view
1807 cl = self.changelog
1816 cl = self.changelog
1808 cl.delayupdate()
1817 cl.delayupdate()
1809 oldheads = len(cl.heads())
1818 oldheads = len(cl.heads())
1810
1819
1811 # pull off the changeset group
1820 # pull off the changeset group
1812 self.ui.status(_("adding changesets\n"))
1821 self.ui.status(_("adding changesets\n"))
1813 cor = cl.count() - 1
1822 cor = cl.count() - 1
1814 chunkiter = changegroup.chunkiter(source)
1823 chunkiter = changegroup.chunkiter(source)
1815 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1824 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1816 raise util.Abort(_("received changelog group is empty"))
1825 raise util.Abort(_("received changelog group is empty"))
1817 cnr = cl.count() - 1
1826 cnr = cl.count() - 1
1818 changesets = cnr - cor
1827 changesets = cnr - cor
1819
1828
1820 # pull off the manifest group
1829 # pull off the manifest group
1821 self.ui.status(_("adding manifests\n"))
1830 self.ui.status(_("adding manifests\n"))
1822 chunkiter = changegroup.chunkiter(source)
1831 chunkiter = changegroup.chunkiter(source)
1823 # no need to check for empty manifest group here:
1832 # no need to check for empty manifest group here:
1824 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1833 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1825 # no new manifest will be created and the manifest group will
1834 # no new manifest will be created and the manifest group will
1826 # be empty during the pull
1835 # be empty during the pull
1827 self.manifest.addgroup(chunkiter, revmap, tr)
1836 self.manifest.addgroup(chunkiter, revmap, tr)
1828
1837
1829 # process the files
1838 # process the files
1830 self.ui.status(_("adding file changes\n"))
1839 self.ui.status(_("adding file changes\n"))
1831 while 1:
1840 while 1:
1832 f = changegroup.getchunk(source)
1841 f = changegroup.getchunk(source)
1833 if not f:
1842 if not f:
1834 break
1843 break
1835 self.ui.debug(_("adding %s revisions\n") % f)
1844 self.ui.debug(_("adding %s revisions\n") % f)
1836 fl = self.file(f)
1845 fl = self.file(f)
1837 o = fl.count()
1846 o = fl.count()
1838 chunkiter = changegroup.chunkiter(source)
1847 chunkiter = changegroup.chunkiter(source)
1839 if fl.addgroup(chunkiter, revmap, tr) is None:
1848 if fl.addgroup(chunkiter, revmap, tr) is None:
1840 raise util.Abort(_("received file revlog group is empty"))
1849 raise util.Abort(_("received file revlog group is empty"))
1841 revisions += fl.count() - o
1850 revisions += fl.count() - o
1842 files += 1
1851 files += 1
1843
1852
1844 # make changelog see real files again
1853 # make changelog see real files again
1845 cl.finalize(tr)
1854 cl.finalize(tr)
1846
1855
1847 newheads = len(self.changelog.heads())
1856 newheads = len(self.changelog.heads())
1848 heads = ""
1857 heads = ""
1849 if oldheads and newheads != oldheads:
1858 if oldheads and newheads != oldheads:
1850 heads = _(" (%+d heads)") % (newheads - oldheads)
1859 heads = _(" (%+d heads)") % (newheads - oldheads)
1851
1860
1852 self.ui.status(_("added %d changesets"
1861 self.ui.status(_("added %d changesets"
1853 " with %d changes to %d files%s\n")
1862 " with %d changes to %d files%s\n")
1854 % (changesets, revisions, files, heads))
1863 % (changesets, revisions, files, heads))
1855
1864
1856 if changesets > 0:
1865 if changesets > 0:
1857 self.hook('pretxnchangegroup', throw=True,
1866 self.hook('pretxnchangegroup', throw=True,
1858 node=hex(self.changelog.node(cor+1)), source=srctype,
1867 node=hex(self.changelog.node(cor+1)), source=srctype,
1859 url=url)
1868 url=url)
1860
1869
1861 tr.close()
1870 tr.close()
1862
1871
1863 if changesets > 0:
1872 if changesets > 0:
1864 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1873 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1865 source=srctype, url=url)
1874 source=srctype, url=url)
1866
1875
1867 for i in xrange(cor + 1, cnr + 1):
1876 for i in xrange(cor + 1, cnr + 1):
1868 self.hook("incoming", node=hex(self.changelog.node(i)),
1877 self.hook("incoming", node=hex(self.changelog.node(i)),
1869 source=srctype, url=url)
1878 source=srctype, url=url)
1870
1879
1871 # never return 0 here:
1880 # never return 0 here:
1872 if newheads < oldheads:
1881 if newheads < oldheads:
1873 return newheads - oldheads - 1
1882 return newheads - oldheads - 1
1874 else:
1883 else:
1875 return newheads - oldheads + 1
1884 return newheads - oldheads + 1
1876
1885
1877
1886
1878 def stream_in(self, remote):
1887 def stream_in(self, remote):
1879 fp = remote.stream_out()
1888 fp = remote.stream_out()
1880 l = fp.readline()
1889 l = fp.readline()
1881 try:
1890 try:
1882 resp = int(l)
1891 resp = int(l)
1883 except ValueError:
1892 except ValueError:
1884 raise util.UnexpectedOutput(
1893 raise util.UnexpectedOutput(
1885 _('Unexpected response from remote server:'), l)
1894 _('Unexpected response from remote server:'), l)
1886 if resp == 1:
1895 if resp == 1:
1887 raise util.Abort(_('operation forbidden by server'))
1896 raise util.Abort(_('operation forbidden by server'))
1888 elif resp == 2:
1897 elif resp == 2:
1889 raise util.Abort(_('locking the remote repository failed'))
1898 raise util.Abort(_('locking the remote repository failed'))
1890 elif resp != 0:
1899 elif resp != 0:
1891 raise util.Abort(_('the server sent an unknown error code'))
1900 raise util.Abort(_('the server sent an unknown error code'))
1892 self.ui.status(_('streaming all changes\n'))
1901 self.ui.status(_('streaming all changes\n'))
1893 l = fp.readline()
1902 l = fp.readline()
1894 try:
1903 try:
1895 total_files, total_bytes = map(int, l.split(' ', 1))
1904 total_files, total_bytes = map(int, l.split(' ', 1))
1896 except ValueError, TypeError:
1905 except ValueError, TypeError:
1897 raise util.UnexpectedOutput(
1906 raise util.UnexpectedOutput(
1898 _('Unexpected response from remote server:'), l)
1907 _('Unexpected response from remote server:'), l)
1899 self.ui.status(_('%d files to transfer, %s of data\n') %
1908 self.ui.status(_('%d files to transfer, %s of data\n') %
1900 (total_files, util.bytecount(total_bytes)))
1909 (total_files, util.bytecount(total_bytes)))
1901 start = time.time()
1910 start = time.time()
1902 for i in xrange(total_files):
1911 for i in xrange(total_files):
1903 # XXX doesn't support '\n' or '\r' in filenames
1912 # XXX doesn't support '\n' or '\r' in filenames
1904 l = fp.readline()
1913 l = fp.readline()
1905 try:
1914 try:
1906 name, size = l.split('\0', 1)
1915 name, size = l.split('\0', 1)
1907 size = int(size)
1916 size = int(size)
1908 except ValueError, TypeError:
1917 except ValueError, TypeError:
1909 raise util.UnexpectedOutput(
1918 raise util.UnexpectedOutput(
1910 _('Unexpected response from remote server:'), l)
1919 _('Unexpected response from remote server:'), l)
1911 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1920 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1912 ofp = self.sopener(name, 'w')
1921 ofp = self.sopener(name, 'w')
1913 for chunk in util.filechunkiter(fp, limit=size):
1922 for chunk in util.filechunkiter(fp, limit=size):
1914 ofp.write(chunk)
1923 ofp.write(chunk)
1915 ofp.close()
1924 ofp.close()
1916 elapsed = time.time() - start
1925 elapsed = time.time() - start
1917 if elapsed <= 0:
1926 if elapsed <= 0:
1918 elapsed = 0.001
1927 elapsed = 0.001
1919 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1928 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1920 (util.bytecount(total_bytes), elapsed,
1929 (util.bytecount(total_bytes), elapsed,
1921 util.bytecount(total_bytes / elapsed)))
1930 util.bytecount(total_bytes / elapsed)))
1922 self.reload()
1931 self.reload()
1923 return len(self.heads()) + 1
1932 return len(self.heads()) + 1
1924
1933
1925 def clone(self, remote, heads=[], stream=False):
1934 def clone(self, remote, heads=[], stream=False):
1926 '''clone remote repository.
1935 '''clone remote repository.
1927
1936
1928 keyword arguments:
1937 keyword arguments:
1929 heads: list of revs to clone (forces use of pull)
1938 heads: list of revs to clone (forces use of pull)
1930 stream: use streaming clone if possible'''
1939 stream: use streaming clone if possible'''
1931
1940
1932 # now, all clients that can request uncompressed clones can
1941 # now, all clients that can request uncompressed clones can
1933 # read repo formats supported by all servers that can serve
1942 # read repo formats supported by all servers that can serve
1934 # them.
1943 # them.
1935
1944
1936 # if revlog format changes, client will have to check version
1945 # if revlog format changes, client will have to check version
1937 # and format flags on "stream" capability, and use
1946 # and format flags on "stream" capability, and use
1938 # uncompressed only if compatible.
1947 # uncompressed only if compatible.
1939
1948
1940 if stream and not heads and remote.capable('stream'):
1949 if stream and not heads and remote.capable('stream'):
1941 return self.stream_in(remote)
1950 return self.stream_in(remote)
1942 return self.pull(remote, heads)
1951 return self.pull(remote, heads)
1943
1952
1944 # used to avoid circular references so destructors work
1953 # used to avoid circular references so destructors work
1945 def aftertrans(files):
1954 def aftertrans(files):
1946 renamefiles = [tuple(t) for t in files]
1955 renamefiles = [tuple(t) for t in files]
1947 def a():
1956 def a():
1948 for src, dest in renamefiles:
1957 for src, dest in renamefiles:
1949 util.rename(src, dest)
1958 util.rename(src, dest)
1950 return a
1959 return a
1951
1960
1952 def instance(ui, path, create):
1961 def instance(ui, path, create):
1953 return localrepository(ui, util.drop_scheme('file', path), create)
1962 return localrepository(ui, util.drop_scheme('file', path), create)
1954
1963
1955 def islocal(path):
1964 def islocal(path):
1956 return True
1965 return True
@@ -1,48 +1,50
1 #!/bin/sh -e
1 #!/bin/sh -e
2
2
3 umask 027
3 umask 027
4 mkdir test1
4 mkdir test1
5 cd test1
5 cd test1
6
6
7 hg init
7 hg init
8 touch a b
8 touch a b
9 hg add a b
9 hg add a b
10 hg ci -m "added a b" -d "1000000 0"
10 hg ci -m "added a b" -d "1000000 0"
11
11
12 cd ..
12 cd ..
13 hg clone test1 test3
13 hg clone test1 test3
14 mkdir test2
14 mkdir test2
15 cd test2
15 cd test2
16
16
17 hg init
17 hg init
18 hg pull ../test1
18 hg pull ../test1
19 hg co
19 hg co
20 chmod +x a
20 chmod +x a
21 hg ci -m "chmod +x a" -d "1000000 0"
21 hg ci -m "chmod +x a" -d "1000000 0"
22 echo % the changelog should mention file a:
23 hg tip --template '#files#\n'
22
24
23 cd ../test1
25 cd ../test1
24 echo 123 >>a
26 echo 123 >>a
25 hg ci -m "a updated" -d "1000000 0"
27 hg ci -m "a updated" -d "1000000 0"
26
28
27 hg pull ../test2
29 hg pull ../test2
28 hg heads
30 hg heads
29 hg history
31 hg history
30
32
31 hg -v merge
33 hg -v merge
32
34
33 cd ../test3
35 cd ../test3
34 echo 123 >>b
36 echo 123 >>b
35 hg ci -m "b updated" -d "1000000 0"
37 hg ci -m "b updated" -d "1000000 0"
36
38
37 hg pull ../test2
39 hg pull ../test2
38 hg heads
40 hg heads
39 hg history
41 hg history
40
42
41 hg -v merge
43 hg -v merge
42
44
43 ls -l ../test[123]/a > foo
45 ls -l ../test[123]/a > foo
44 cut -b 1-10 < foo
46 cut -b 1-10 < foo
45
47
46 hg debugindex .hg/store/data/a.i
48 hg debugindex .hg/store/data/a.i
47 hg debugindex ../test2/.hg/store/data/a.i
49 hg debugindex ../test2/.hg/store/data/a.i
48 hg debugindex ../test1/.hg/store/data/a.i
50 hg debugindex ../test1/.hg/store/data/a.i
@@ -1,102 +1,104
1 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 pulling from ../test1
2 pulling from ../test1
3 requesting all changes
3 requesting all changes
4 adding changesets
4 adding changesets
5 adding manifests
5 adding manifests
6 adding file changes
6 adding file changes
7 added 1 changesets with 2 changes to 2 files
7 added 1 changesets with 2 changes to 2 files
8 (run 'hg update' to get a working copy)
8 (run 'hg update' to get a working copy)
9 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
9 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
10 % the changelog should mention file a:
11 a
10 pulling from ../test2
12 pulling from ../test2
11 searching for changes
13 searching for changes
12 adding changesets
14 adding changesets
13 adding manifests
15 adding manifests
14 adding file changes
16 adding file changes
15 added 1 changesets with 1 changes to 1 files (+1 heads)
17 added 1 changesets with 1 changes to 1 files (+1 heads)
16 (run 'hg heads' to see heads, 'hg merge' to merge)
18 (run 'hg heads' to see heads, 'hg merge' to merge)
17 changeset: 2:b833d578451e
19 changeset: 2:b833d578451e
18 tag: tip
20 tag: tip
19 parent: 0:4536b1c2ca69
21 parent: 0:4536b1c2ca69
20 user: test
22 user: test
21 date: Mon Jan 12 13:46:40 1970 +0000
23 date: Mon Jan 12 13:46:40 1970 +0000
22 summary: chmod +x a
24 summary: chmod +x a
23
25
24 changeset: 1:a187cb361a5a
26 changeset: 1:a187cb361a5a
25 user: test
27 user: test
26 date: Mon Jan 12 13:46:40 1970 +0000
28 date: Mon Jan 12 13:46:40 1970 +0000
27 summary: a updated
29 summary: a updated
28
30
29 changeset: 2:b833d578451e
31 changeset: 2:b833d578451e
30 tag: tip
32 tag: tip
31 parent: 0:4536b1c2ca69
33 parent: 0:4536b1c2ca69
32 user: test
34 user: test
33 date: Mon Jan 12 13:46:40 1970 +0000
35 date: Mon Jan 12 13:46:40 1970 +0000
34 summary: chmod +x a
36 summary: chmod +x a
35
37
36 changeset: 1:a187cb361a5a
38 changeset: 1:a187cb361a5a
37 user: test
39 user: test
38 date: Mon Jan 12 13:46:40 1970 +0000
40 date: Mon Jan 12 13:46:40 1970 +0000
39 summary: a updated
41 summary: a updated
40
42
41 changeset: 0:4536b1c2ca69
43 changeset: 0:4536b1c2ca69
42 user: test
44 user: test
43 date: Mon Jan 12 13:46:40 1970 +0000
45 date: Mon Jan 12 13:46:40 1970 +0000
44 summary: added a b
46 summary: added a b
45
47
46 resolving manifests
48 resolving manifests
47 merging a
49 merging a
48 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
50 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
49 (branch merge, don't forget to commit)
51 (branch merge, don't forget to commit)
50 pulling from ../test2
52 pulling from ../test2
51 searching for changes
53 searching for changes
52 adding changesets
54 adding changesets
53 adding manifests
55 adding manifests
54 adding file changes
56 adding file changes
55 added 1 changesets with 1 changes to 1 files (+1 heads)
57 added 1 changesets with 1 changes to 1 files (+1 heads)
56 (run 'hg heads' to see heads, 'hg merge' to merge)
58 (run 'hg heads' to see heads, 'hg merge' to merge)
57 changeset: 2:b833d578451e
59 changeset: 2:b833d578451e
58 tag: tip
60 tag: tip
59 parent: 0:4536b1c2ca69
61 parent: 0:4536b1c2ca69
60 user: test
62 user: test
61 date: Mon Jan 12 13:46:40 1970 +0000
63 date: Mon Jan 12 13:46:40 1970 +0000
62 summary: chmod +x a
64 summary: chmod +x a
63
65
64 changeset: 1:d54568174d8e
66 changeset: 1:d54568174d8e
65 user: test
67 user: test
66 date: Mon Jan 12 13:46:40 1970 +0000
68 date: Mon Jan 12 13:46:40 1970 +0000
67 summary: b updated
69 summary: b updated
68
70
69 changeset: 2:b833d578451e
71 changeset: 2:b833d578451e
70 tag: tip
72 tag: tip
71 parent: 0:4536b1c2ca69
73 parent: 0:4536b1c2ca69
72 user: test
74 user: test
73 date: Mon Jan 12 13:46:40 1970 +0000
75 date: Mon Jan 12 13:46:40 1970 +0000
74 summary: chmod +x a
76 summary: chmod +x a
75
77
76 changeset: 1:d54568174d8e
78 changeset: 1:d54568174d8e
77 user: test
79 user: test
78 date: Mon Jan 12 13:46:40 1970 +0000
80 date: Mon Jan 12 13:46:40 1970 +0000
79 summary: b updated
81 summary: b updated
80
82
81 changeset: 0:4536b1c2ca69
83 changeset: 0:4536b1c2ca69
82 user: test
84 user: test
83 date: Mon Jan 12 13:46:40 1970 +0000
85 date: Mon Jan 12 13:46:40 1970 +0000
84 summary: added a b
86 summary: added a b
85
87
86 resolving manifests
88 resolving manifests
87 getting a
89 getting a
88 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 (branch merge, don't forget to commit)
91 (branch merge, don't forget to commit)
90 -rwxr-x---
92 -rwxr-x---
91 -rwxr-x---
93 -rwxr-x---
92 -rwxr-x---
94 -rwxr-x---
93 rev offset length base linkrev nodeid p1 p2
95 rev offset length base linkrev nodeid p1 p2
94 0 0 0 0 0 b80de5d13875 000000000000 000000000000
96 0 0 0 0 0 b80de5d13875 000000000000 000000000000
95 1 0 0 0 2 37c42bd6cc03 b80de5d13875 000000000000
97 1 0 0 0 2 37c42bd6cc03 b80de5d13875 000000000000
96 rev offset length base linkrev nodeid p1 p2
98 rev offset length base linkrev nodeid p1 p2
97 0 0 0 0 0 b80de5d13875 000000000000 000000000000
99 0 0 0 0 0 b80de5d13875 000000000000 000000000000
98 1 0 0 0 1 37c42bd6cc03 b80de5d13875 000000000000
100 1 0 0 0 1 37c42bd6cc03 b80de5d13875 000000000000
99 rev offset length base linkrev nodeid p1 p2
101 rev offset length base linkrev nodeid p1 p2
100 0 0 0 0 0 b80de5d13875 000000000000 000000000000
102 0 0 0 0 0 b80de5d13875 000000000000 000000000000
101 1 0 5 1 1 7fe919cc0336 b80de5d13875 000000000000
103 1 0 5 1 1 7fe919cc0336 b80de5d13875 000000000000
102 2 5 0 2 2 37c42bd6cc03 b80de5d13875 000000000000
104 2 5 0 2 2 37c42bd6cc03 b80de5d13875 000000000000
General Comments 0
You need to be logged in to leave comments. Login now