##// END OF EJS Templates
Export extra in _tag so convert can set the branch of a tag
Brendan Cully -
r4864:fc389dcc default
parent child Browse files
Show More
@@ -1,1929 +1,1931
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.path = path
23 self.path = path
24 self.root = os.path.realpath(path)
24 self.root = os.path.realpath(path)
25 self.path = os.path.join(self.root, ".hg")
25 self.path = os.path.join(self.root, ".hg")
26 self.origroot = path
26 self.origroot = path
27 self.opener = util.opener(self.path)
27 self.opener = util.opener(self.path)
28 self.wopener = util.opener(self.root)
28 self.wopener = util.opener(self.root)
29
29
30 if not os.path.isdir(self.path):
30 if not os.path.isdir(self.path):
31 if create:
31 if create:
32 if not os.path.exists(path):
32 if not os.path.exists(path):
33 os.mkdir(path)
33 os.mkdir(path)
34 os.mkdir(self.path)
34 os.mkdir(self.path)
35 requirements = ["revlogv1"]
35 requirements = ["revlogv1"]
36 if parentui.configbool('format', 'usestore', True):
36 if parentui.configbool('format', 'usestore', True):
37 os.mkdir(os.path.join(self.path, "store"))
37 os.mkdir(os.path.join(self.path, "store"))
38 requirements.append("store")
38 requirements.append("store")
39 # create an invalid changelog
39 # create an invalid changelog
40 self.opener("00changelog.i", "a").write(
40 self.opener("00changelog.i", "a").write(
41 '\0\0\0\2' # represents revlogv2
41 '\0\0\0\2' # represents revlogv2
42 ' dummy changelog to prevent using the old repo layout'
42 ' dummy changelog to prevent using the old repo layout'
43 )
43 )
44 reqfile = self.opener("requires", "w")
44 reqfile = self.opener("requires", "w")
45 for r in requirements:
45 for r in requirements:
46 reqfile.write("%s\n" % r)
46 reqfile.write("%s\n" % r)
47 reqfile.close()
47 reqfile.close()
48 else:
48 else:
49 raise repo.RepoError(_("repository %s not found") % path)
49 raise repo.RepoError(_("repository %s not found") % path)
50 elif create:
50 elif create:
51 raise repo.RepoError(_("repository %s already exists") % path)
51 raise repo.RepoError(_("repository %s already exists") % path)
52 else:
52 else:
53 # find requirements
53 # find requirements
54 try:
54 try:
55 requirements = self.opener("requires").read().splitlines()
55 requirements = self.opener("requires").read().splitlines()
56 except IOError, inst:
56 except IOError, inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 requirements = []
59 requirements = []
60 # check them
60 # check them
61 for r in requirements:
61 for r in requirements:
62 if r not in self.supported:
62 if r not in self.supported:
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64
64
65 # setup store
65 # setup store
66 if "store" in requirements:
66 if "store" in requirements:
67 self.encodefn = util.encodefilename
67 self.encodefn = util.encodefilename
68 self.decodefn = util.decodefilename
68 self.decodefn = util.decodefilename
69 self.spath = os.path.join(self.path, "store")
69 self.spath = os.path.join(self.path, "store")
70 else:
70 else:
71 self.encodefn = lambda x: x
71 self.encodefn = lambda x: x
72 self.decodefn = lambda x: x
72 self.decodefn = lambda x: x
73 self.spath = self.path
73 self.spath = self.path
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75
75
76 self.ui = ui.ui(parentui=parentui)
76 self.ui = ui.ui(parentui=parentui)
77 try:
77 try:
78 self.ui.readconfig(self.join("hgrc"), self.root)
78 self.ui.readconfig(self.join("hgrc"), self.root)
79 extensions.loadall(self.ui)
79 extensions.loadall(self.ui)
80 except IOError:
80 except IOError:
81 pass
81 pass
82
82
83 self.tagscache = None
83 self.tagscache = None
84 self.branchcache = None
84 self.branchcache = None
85 self.nodetagscache = None
85 self.nodetagscache = None
86 self.filterpats = {}
86 self.filterpats = {}
87 self.transhandle = None
87 self.transhandle = None
88
88
89 def __getattr__(self, name):
89 def __getattr__(self, name):
90 if name == 'changelog':
90 if name == 'changelog':
91 self.changelog = changelog.changelog(self.sopener)
91 self.changelog = changelog.changelog(self.sopener)
92 self.sopener.defversion = self.changelog.version
92 self.sopener.defversion = self.changelog.version
93 return self.changelog
93 return self.changelog
94 if name == 'manifest':
94 if name == 'manifest':
95 self.changelog
95 self.changelog
96 self.manifest = manifest.manifest(self.sopener)
96 self.manifest = manifest.manifest(self.sopener)
97 return self.manifest
97 return self.manifest
98 if name == 'dirstate':
98 if name == 'dirstate':
99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 return self.dirstate
100 return self.dirstate
101 else:
101 else:
102 raise AttributeError, name
102 raise AttributeError, name
103
103
104 def url(self):
104 def url(self):
105 return 'file:' + self.root
105 return 'file:' + self.root
106
106
107 def hook(self, name, throw=False, **args):
107 def hook(self, name, throw=False, **args):
108 return hook.hook(self.ui, self, name, throw, **args)
108 return hook.hook(self.ui, self, name, throw, **args)
109
109
110 tag_disallowed = ':\r\n'
110 tag_disallowed = ':\r\n'
111
111
112 def _tag(self, name, node, message, local, user, date, parent=None):
112 def _tag(self, name, node, message, local, user, date, parent=None,
113 extra={}):
113 use_dirstate = parent is None
114 use_dirstate = parent is None
114
115
115 for c in self.tag_disallowed:
116 for c in self.tag_disallowed:
116 if c in name:
117 if c in name:
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118 raise util.Abort(_('%r cannot be used in a tag name') % c)
118
119
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120
121
121 if local:
122 if local:
122 # local tags are stored in the current charset
123 # local tags are stored in the current charset
123 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
124 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
124 self.hook('tag', node=hex(node), tag=name, local=local)
125 self.hook('tag', node=hex(node), tag=name, local=local)
125 return
126 return
126
127
127 # committed tags are stored in UTF-8
128 # committed tags are stored in UTF-8
128 line = '%s %s\n' % (hex(node), util.fromlocal(name))
129 line = '%s %s\n' % (hex(node), util.fromlocal(name))
129 if use_dirstate:
130 if use_dirstate:
130 self.wfile('.hgtags', 'ab').write(line)
131 self.wfile('.hgtags', 'ab').write(line)
131 else:
132 else:
132 ntags = self.filectx('.hgtags', parent).data()
133 ntags = self.filectx('.hgtags', parent).data()
133 self.wfile('.hgtags', 'ab').write(ntags + line)
134 self.wfile('.hgtags', 'ab').write(ntags + line)
134 if use_dirstate and self.dirstate.state('.hgtags') == '?':
135 if use_dirstate and self.dirstate.state('.hgtags') == '?':
135 self.add(['.hgtags'])
136 self.add(['.hgtags'])
136
137
137 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
138 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
139 extra=extra)
138
140
139 self.hook('tag', node=hex(node), tag=name, local=local)
141 self.hook('tag', node=hex(node), tag=name, local=local)
140
142
141 return tagnode
143 return tagnode
142
144
143 def tag(self, name, node, message, local, user, date):
145 def tag(self, name, node, message, local, user, date):
144 '''tag a revision with a symbolic name.
146 '''tag a revision with a symbolic name.
145
147
146 if local is True, the tag is stored in a per-repository file.
148 if local is True, the tag is stored in a per-repository file.
147 otherwise, it is stored in the .hgtags file, and a new
149 otherwise, it is stored in the .hgtags file, and a new
148 changeset is committed with the change.
150 changeset is committed with the change.
149
151
150 keyword arguments:
152 keyword arguments:
151
153
152 local: whether to store tag in non-version-controlled file
154 local: whether to store tag in non-version-controlled file
153 (default False)
155 (default False)
154
156
155 message: commit message to use if committing
157 message: commit message to use if committing
156
158
157 user: name of user to use if committing
159 user: name of user to use if committing
158
160
159 date: date tuple to use if committing'''
161 date: date tuple to use if committing'''
160
162
161 for x in self.status()[:5]:
163 for x in self.status()[:5]:
162 if '.hgtags' in x:
164 if '.hgtags' in x:
163 raise util.Abort(_('working copy of .hgtags is changed '
165 raise util.Abort(_('working copy of .hgtags is changed '
164 '(please commit .hgtags manually)'))
166 '(please commit .hgtags manually)'))
165
167
166
168
167 self._tag(name, node, message, local, user, date)
169 self._tag(name, node, message, local, user, date)
168
170
169 def tags(self):
171 def tags(self):
170 '''return a mapping of tag to node'''
172 '''return a mapping of tag to node'''
171 if self.tagscache:
173 if self.tagscache:
172 return self.tagscache
174 return self.tagscache
173
175
174 globaltags = {}
176 globaltags = {}
175
177
176 def readtags(lines, fn):
178 def readtags(lines, fn):
177 filetags = {}
179 filetags = {}
178 count = 0
180 count = 0
179
181
180 def warn(msg):
182 def warn(msg):
181 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
183 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
182
184
183 for l in lines:
185 for l in lines:
184 count += 1
186 count += 1
185 if not l:
187 if not l:
186 continue
188 continue
187 s = l.split(" ", 1)
189 s = l.split(" ", 1)
188 if len(s) != 2:
190 if len(s) != 2:
189 warn(_("cannot parse entry"))
191 warn(_("cannot parse entry"))
190 continue
192 continue
191 node, key = s
193 node, key = s
192 key = util.tolocal(key.strip()) # stored in UTF-8
194 key = util.tolocal(key.strip()) # stored in UTF-8
193 try:
195 try:
194 bin_n = bin(node)
196 bin_n = bin(node)
195 except TypeError:
197 except TypeError:
196 warn(_("node '%s' is not well formed") % node)
198 warn(_("node '%s' is not well formed") % node)
197 continue
199 continue
198 if bin_n not in self.changelog.nodemap:
200 if bin_n not in self.changelog.nodemap:
199 warn(_("tag '%s' refers to unknown node") % key)
201 warn(_("tag '%s' refers to unknown node") % key)
200 continue
202 continue
201
203
202 h = []
204 h = []
203 if key in filetags:
205 if key in filetags:
204 n, h = filetags[key]
206 n, h = filetags[key]
205 h.append(n)
207 h.append(n)
206 filetags[key] = (bin_n, h)
208 filetags[key] = (bin_n, h)
207
209
208 for k, nh in filetags.items():
210 for k, nh in filetags.items():
209 if k not in globaltags:
211 if k not in globaltags:
210 globaltags[k] = nh
212 globaltags[k] = nh
211 continue
213 continue
212 # we prefer the global tag if:
214 # we prefer the global tag if:
213 # it supercedes us OR
215 # it supercedes us OR
214 # mutual supercedes and it has a higher rank
216 # mutual supercedes and it has a higher rank
215 # otherwise we win because we're tip-most
217 # otherwise we win because we're tip-most
216 an, ah = nh
218 an, ah = nh
217 bn, bh = globaltags[k]
219 bn, bh = globaltags[k]
218 if (bn != an and an in bh and
220 if (bn != an and an in bh and
219 (bn not in ah or len(bh) > len(ah))):
221 (bn not in ah or len(bh) > len(ah))):
220 an = bn
222 an = bn
221 ah.extend([n for n in bh if n not in ah])
223 ah.extend([n for n in bh if n not in ah])
222 globaltags[k] = an, ah
224 globaltags[k] = an, ah
223
225
224 # read the tags file from each head, ending with the tip
226 # read the tags file from each head, ending with the tip
225 f = None
227 f = None
226 for rev, node, fnode in self._hgtagsnodes():
228 for rev, node, fnode in self._hgtagsnodes():
227 f = (f and f.filectx(fnode) or
229 f = (f and f.filectx(fnode) or
228 self.filectx('.hgtags', fileid=fnode))
230 self.filectx('.hgtags', fileid=fnode))
229 readtags(f.data().splitlines(), f)
231 readtags(f.data().splitlines(), f)
230
232
231 try:
233 try:
232 data = util.fromlocal(self.opener("localtags").read())
234 data = util.fromlocal(self.opener("localtags").read())
233 # localtags are stored in the local character set
235 # localtags are stored in the local character set
234 # while the internal tag table is stored in UTF-8
236 # while the internal tag table is stored in UTF-8
235 readtags(data.splitlines(), "localtags")
237 readtags(data.splitlines(), "localtags")
236 except IOError:
238 except IOError:
237 pass
239 pass
238
240
239 self.tagscache = {}
241 self.tagscache = {}
240 for k,nh in globaltags.items():
242 for k,nh in globaltags.items():
241 n = nh[0]
243 n = nh[0]
242 if n != nullid:
244 if n != nullid:
243 self.tagscache[k] = n
245 self.tagscache[k] = n
244 self.tagscache['tip'] = self.changelog.tip()
246 self.tagscache['tip'] = self.changelog.tip()
245
247
246 return self.tagscache
248 return self.tagscache
247
249
248 def _hgtagsnodes(self):
250 def _hgtagsnodes(self):
249 heads = self.heads()
251 heads = self.heads()
250 heads.reverse()
252 heads.reverse()
251 last = {}
253 last = {}
252 ret = []
254 ret = []
253 for node in heads:
255 for node in heads:
254 c = self.changectx(node)
256 c = self.changectx(node)
255 rev = c.rev()
257 rev = c.rev()
256 try:
258 try:
257 fnode = c.filenode('.hgtags')
259 fnode = c.filenode('.hgtags')
258 except revlog.LookupError:
260 except revlog.LookupError:
259 continue
261 continue
260 ret.append((rev, node, fnode))
262 ret.append((rev, node, fnode))
261 if fnode in last:
263 if fnode in last:
262 ret[last[fnode]] = None
264 ret[last[fnode]] = None
263 last[fnode] = len(ret) - 1
265 last[fnode] = len(ret) - 1
264 return [item for item in ret if item]
266 return [item for item in ret if item]
265
267
266 def tagslist(self):
268 def tagslist(self):
267 '''return a list of tags ordered by revision'''
269 '''return a list of tags ordered by revision'''
268 l = []
270 l = []
269 for t, n in self.tags().items():
271 for t, n in self.tags().items():
270 try:
272 try:
271 r = self.changelog.rev(n)
273 r = self.changelog.rev(n)
272 except:
274 except:
273 r = -2 # sort to the beginning of the list if unknown
275 r = -2 # sort to the beginning of the list if unknown
274 l.append((r, t, n))
276 l.append((r, t, n))
275 l.sort()
277 l.sort()
276 return [(t, n) for r, t, n in l]
278 return [(t, n) for r, t, n in l]
277
279
278 def nodetags(self, node):
280 def nodetags(self, node):
279 '''return the tags associated with a node'''
281 '''return the tags associated with a node'''
280 if not self.nodetagscache:
282 if not self.nodetagscache:
281 self.nodetagscache = {}
283 self.nodetagscache = {}
282 for t, n in self.tags().items():
284 for t, n in self.tags().items():
283 self.nodetagscache.setdefault(n, []).append(t)
285 self.nodetagscache.setdefault(n, []).append(t)
284 return self.nodetagscache.get(node, [])
286 return self.nodetagscache.get(node, [])
285
287
286 def _branchtags(self):
288 def _branchtags(self):
287 partial, last, lrev = self._readbranchcache()
289 partial, last, lrev = self._readbranchcache()
288
290
289 tiprev = self.changelog.count() - 1
291 tiprev = self.changelog.count() - 1
290 if lrev != tiprev:
292 if lrev != tiprev:
291 self._updatebranchcache(partial, lrev+1, tiprev+1)
293 self._updatebranchcache(partial, lrev+1, tiprev+1)
292 self._writebranchcache(partial, self.changelog.tip(), tiprev)
294 self._writebranchcache(partial, self.changelog.tip(), tiprev)
293
295
294 return partial
296 return partial
295
297
296 def branchtags(self):
298 def branchtags(self):
297 if self.branchcache is not None:
299 if self.branchcache is not None:
298 return self.branchcache
300 return self.branchcache
299
301
300 self.branchcache = {} # avoid recursion in changectx
302 self.branchcache = {} # avoid recursion in changectx
301 partial = self._branchtags()
303 partial = self._branchtags()
302
304
303 # the branch cache is stored on disk as UTF-8, but in the local
305 # the branch cache is stored on disk as UTF-8, but in the local
304 # charset internally
306 # charset internally
305 for k, v in partial.items():
307 for k, v in partial.items():
306 self.branchcache[util.tolocal(k)] = v
308 self.branchcache[util.tolocal(k)] = v
307 return self.branchcache
309 return self.branchcache
308
310
309 def _readbranchcache(self):
311 def _readbranchcache(self):
310 partial = {}
312 partial = {}
311 try:
313 try:
312 f = self.opener("branch.cache")
314 f = self.opener("branch.cache")
313 lines = f.read().split('\n')
315 lines = f.read().split('\n')
314 f.close()
316 f.close()
315 except (IOError, OSError):
317 except (IOError, OSError):
316 return {}, nullid, nullrev
318 return {}, nullid, nullrev
317
319
318 try:
320 try:
319 last, lrev = lines.pop(0).split(" ", 1)
321 last, lrev = lines.pop(0).split(" ", 1)
320 last, lrev = bin(last), int(lrev)
322 last, lrev = bin(last), int(lrev)
321 if not (lrev < self.changelog.count() and
323 if not (lrev < self.changelog.count() and
322 self.changelog.node(lrev) == last): # sanity check
324 self.changelog.node(lrev) == last): # sanity check
323 # invalidate the cache
325 # invalidate the cache
324 raise ValueError('Invalid branch cache: unknown tip')
326 raise ValueError('Invalid branch cache: unknown tip')
325 for l in lines:
327 for l in lines:
326 if not l: continue
328 if not l: continue
327 node, label = l.split(" ", 1)
329 node, label = l.split(" ", 1)
328 partial[label.strip()] = bin(node)
330 partial[label.strip()] = bin(node)
329 except (KeyboardInterrupt, util.SignalInterrupt):
331 except (KeyboardInterrupt, util.SignalInterrupt):
330 raise
332 raise
331 except Exception, inst:
333 except Exception, inst:
332 if self.ui.debugflag:
334 if self.ui.debugflag:
333 self.ui.warn(str(inst), '\n')
335 self.ui.warn(str(inst), '\n')
334 partial, last, lrev = {}, nullid, nullrev
336 partial, last, lrev = {}, nullid, nullrev
335 return partial, last, lrev
337 return partial, last, lrev
336
338
337 def _writebranchcache(self, branches, tip, tiprev):
339 def _writebranchcache(self, branches, tip, tiprev):
338 try:
340 try:
339 f = self.opener("branch.cache", "w", atomictemp=True)
341 f = self.opener("branch.cache", "w", atomictemp=True)
340 f.write("%s %s\n" % (hex(tip), tiprev))
342 f.write("%s %s\n" % (hex(tip), tiprev))
341 for label, node in branches.iteritems():
343 for label, node in branches.iteritems():
342 f.write("%s %s\n" % (hex(node), label))
344 f.write("%s %s\n" % (hex(node), label))
343 f.rename()
345 f.rename()
344 except (IOError, OSError):
346 except (IOError, OSError):
345 pass
347 pass
346
348
347 def _updatebranchcache(self, partial, start, end):
349 def _updatebranchcache(self, partial, start, end):
348 for r in xrange(start, end):
350 for r in xrange(start, end):
349 c = self.changectx(r)
351 c = self.changectx(r)
350 b = c.branch()
352 b = c.branch()
351 partial[b] = c.node()
353 partial[b] = c.node()
352
354
353 def lookup(self, key):
355 def lookup(self, key):
354 if key == '.':
356 if key == '.':
355 key, second = self.dirstate.parents()
357 key, second = self.dirstate.parents()
356 if key == nullid:
358 if key == nullid:
357 raise repo.RepoError(_("no revision checked out"))
359 raise repo.RepoError(_("no revision checked out"))
358 if second != nullid:
360 if second != nullid:
359 self.ui.warn(_("warning: working directory has two parents, "
361 self.ui.warn(_("warning: working directory has two parents, "
360 "tag '.' uses the first\n"))
362 "tag '.' uses the first\n"))
361 elif key == 'null':
363 elif key == 'null':
362 return nullid
364 return nullid
363 n = self.changelog._match(key)
365 n = self.changelog._match(key)
364 if n:
366 if n:
365 return n
367 return n
366 if key in self.tags():
368 if key in self.tags():
367 return self.tags()[key]
369 return self.tags()[key]
368 if key in self.branchtags():
370 if key in self.branchtags():
369 return self.branchtags()[key]
371 return self.branchtags()[key]
370 n = self.changelog._partialmatch(key)
372 n = self.changelog._partialmatch(key)
371 if n:
373 if n:
372 return n
374 return n
373 raise repo.RepoError(_("unknown revision '%s'") % key)
375 raise repo.RepoError(_("unknown revision '%s'") % key)
374
376
375 def dev(self):
377 def dev(self):
376 return os.lstat(self.path).st_dev
378 return os.lstat(self.path).st_dev
377
379
378 def local(self):
380 def local(self):
379 return True
381 return True
380
382
381 def join(self, f):
383 def join(self, f):
382 return os.path.join(self.path, f)
384 return os.path.join(self.path, f)
383
385
384 def sjoin(self, f):
386 def sjoin(self, f):
385 f = self.encodefn(f)
387 f = self.encodefn(f)
386 return os.path.join(self.spath, f)
388 return os.path.join(self.spath, f)
387
389
388 def wjoin(self, f):
390 def wjoin(self, f):
389 return os.path.join(self.root, f)
391 return os.path.join(self.root, f)
390
392
391 def file(self, f):
393 def file(self, f):
392 if f[0] == '/':
394 if f[0] == '/':
393 f = f[1:]
395 f = f[1:]
394 return filelog.filelog(self.sopener, f)
396 return filelog.filelog(self.sopener, f)
395
397
396 def changectx(self, changeid=None):
398 def changectx(self, changeid=None):
397 return context.changectx(self, changeid)
399 return context.changectx(self, changeid)
398
400
399 def workingctx(self):
401 def workingctx(self):
400 return context.workingctx(self)
402 return context.workingctx(self)
401
403
402 def parents(self, changeid=None):
404 def parents(self, changeid=None):
403 '''
405 '''
404 get list of changectxs for parents of changeid or working directory
406 get list of changectxs for parents of changeid or working directory
405 '''
407 '''
406 if changeid is None:
408 if changeid is None:
407 pl = self.dirstate.parents()
409 pl = self.dirstate.parents()
408 else:
410 else:
409 n = self.changelog.lookup(changeid)
411 n = self.changelog.lookup(changeid)
410 pl = self.changelog.parents(n)
412 pl = self.changelog.parents(n)
411 if pl[1] == nullid:
413 if pl[1] == nullid:
412 return [self.changectx(pl[0])]
414 return [self.changectx(pl[0])]
413 return [self.changectx(pl[0]), self.changectx(pl[1])]
415 return [self.changectx(pl[0]), self.changectx(pl[1])]
414
416
415 def filectx(self, path, changeid=None, fileid=None):
417 def filectx(self, path, changeid=None, fileid=None):
416 """changeid can be a changeset revision, node, or tag.
418 """changeid can be a changeset revision, node, or tag.
417 fileid can be a file revision or node."""
419 fileid can be a file revision or node."""
418 return context.filectx(self, path, changeid, fileid)
420 return context.filectx(self, path, changeid, fileid)
419
421
420 def getcwd(self):
422 def getcwd(self):
421 return self.dirstate.getcwd()
423 return self.dirstate.getcwd()
422
424
423 def pathto(self, f, cwd=None):
425 def pathto(self, f, cwd=None):
424 return self.dirstate.pathto(f, cwd)
426 return self.dirstate.pathto(f, cwd)
425
427
426 def wfile(self, f, mode='r'):
428 def wfile(self, f, mode='r'):
427 return self.wopener(f, mode)
429 return self.wopener(f, mode)
428
430
429 def _link(self, f):
431 def _link(self, f):
430 return os.path.islink(self.wjoin(f))
432 return os.path.islink(self.wjoin(f))
431
433
432 def _filter(self, filter, filename, data):
434 def _filter(self, filter, filename, data):
433 if filter not in self.filterpats:
435 if filter not in self.filterpats:
434 l = []
436 l = []
435 for pat, cmd in self.ui.configitems(filter):
437 for pat, cmd in self.ui.configitems(filter):
436 mf = util.matcher(self.root, "", [pat], [], [])[1]
438 mf = util.matcher(self.root, "", [pat], [], [])[1]
437 l.append((mf, cmd))
439 l.append((mf, cmd))
438 self.filterpats[filter] = l
440 self.filterpats[filter] = l
439
441
440 for mf, cmd in self.filterpats[filter]:
442 for mf, cmd in self.filterpats[filter]:
441 if mf(filename):
443 if mf(filename):
442 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
444 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
443 data = util.filter(data, cmd)
445 data = util.filter(data, cmd)
444 break
446 break
445
447
446 return data
448 return data
447
449
448 def wread(self, filename):
450 def wread(self, filename):
449 if self._link(filename):
451 if self._link(filename):
450 data = os.readlink(self.wjoin(filename))
452 data = os.readlink(self.wjoin(filename))
451 else:
453 else:
452 data = self.wopener(filename, 'r').read()
454 data = self.wopener(filename, 'r').read()
453 return self._filter("encode", filename, data)
455 return self._filter("encode", filename, data)
454
456
455 def wwrite(self, filename, data, flags):
457 def wwrite(self, filename, data, flags):
456 data = self._filter("decode", filename, data)
458 data = self._filter("decode", filename, data)
457 if "l" in flags:
459 if "l" in flags:
458 f = self.wjoin(filename)
460 f = self.wjoin(filename)
459 try:
461 try:
460 os.unlink(f)
462 os.unlink(f)
461 except OSError:
463 except OSError:
462 pass
464 pass
463 d = os.path.dirname(f)
465 d = os.path.dirname(f)
464 if not os.path.exists(d):
466 if not os.path.exists(d):
465 os.makedirs(d)
467 os.makedirs(d)
466 os.symlink(data, f)
468 os.symlink(data, f)
467 else:
469 else:
468 try:
470 try:
469 if self._link(filename):
471 if self._link(filename):
470 os.unlink(self.wjoin(filename))
472 os.unlink(self.wjoin(filename))
471 except OSError:
473 except OSError:
472 pass
474 pass
473 self.wopener(filename, 'w').write(data)
475 self.wopener(filename, 'w').write(data)
474 util.set_exec(self.wjoin(filename), "x" in flags)
476 util.set_exec(self.wjoin(filename), "x" in flags)
475
477
476 def wwritedata(self, filename, data):
478 def wwritedata(self, filename, data):
477 return self._filter("decode", filename, data)
479 return self._filter("decode", filename, data)
478
480
479 def transaction(self):
481 def transaction(self):
480 tr = self.transhandle
482 tr = self.transhandle
481 if tr != None and tr.running():
483 if tr != None and tr.running():
482 return tr.nest()
484 return tr.nest()
483
485
484 # save dirstate for rollback
486 # save dirstate for rollback
485 try:
487 try:
486 ds = self.opener("dirstate").read()
488 ds = self.opener("dirstate").read()
487 except IOError:
489 except IOError:
488 ds = ""
490 ds = ""
489 self.opener("journal.dirstate", "w").write(ds)
491 self.opener("journal.dirstate", "w").write(ds)
490
492
491 renames = [(self.sjoin("journal"), self.sjoin("undo")),
493 renames = [(self.sjoin("journal"), self.sjoin("undo")),
492 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
494 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
493 tr = transaction.transaction(self.ui.warn, self.sopener,
495 tr = transaction.transaction(self.ui.warn, self.sopener,
494 self.sjoin("journal"),
496 self.sjoin("journal"),
495 aftertrans(renames))
497 aftertrans(renames))
496 self.transhandle = tr
498 self.transhandle = tr
497 return tr
499 return tr
498
500
499 def recover(self):
501 def recover(self):
500 l = self.lock()
502 l = self.lock()
501 if os.path.exists(self.sjoin("journal")):
503 if os.path.exists(self.sjoin("journal")):
502 self.ui.status(_("rolling back interrupted transaction\n"))
504 self.ui.status(_("rolling back interrupted transaction\n"))
503 transaction.rollback(self.sopener, self.sjoin("journal"))
505 transaction.rollback(self.sopener, self.sjoin("journal"))
504 self.invalidate()
506 self.invalidate()
505 return True
507 return True
506 else:
508 else:
507 self.ui.warn(_("no interrupted transaction available\n"))
509 self.ui.warn(_("no interrupted transaction available\n"))
508 return False
510 return False
509
511
510 def rollback(self, wlock=None, lock=None):
512 def rollback(self, wlock=None, lock=None):
511 if not wlock:
513 if not wlock:
512 wlock = self.wlock()
514 wlock = self.wlock()
513 if not lock:
515 if not lock:
514 lock = self.lock()
516 lock = self.lock()
515 if os.path.exists(self.sjoin("undo")):
517 if os.path.exists(self.sjoin("undo")):
516 self.ui.status(_("rolling back last transaction\n"))
518 self.ui.status(_("rolling back last transaction\n"))
517 transaction.rollback(self.sopener, self.sjoin("undo"))
519 transaction.rollback(self.sopener, self.sjoin("undo"))
518 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
520 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
519 self.invalidate()
521 self.invalidate()
520 self.dirstate.invalidate()
522 self.dirstate.invalidate()
521 else:
523 else:
522 self.ui.warn(_("no rollback information available\n"))
524 self.ui.warn(_("no rollback information available\n"))
523
525
524 def invalidate(self):
526 def invalidate(self):
525 for a in "changelog manifest".split():
527 for a in "changelog manifest".split():
526 if hasattr(self, a):
528 if hasattr(self, a):
527 self.__delattr__(a)
529 self.__delattr__(a)
528 self.tagscache = None
530 self.tagscache = None
529 self.nodetagscache = None
531 self.nodetagscache = None
530
532
531 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
533 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
532 desc=None):
534 desc=None):
533 try:
535 try:
534 l = lock.lock(lockname, 0, releasefn, desc=desc)
536 l = lock.lock(lockname, 0, releasefn, desc=desc)
535 except lock.LockHeld, inst:
537 except lock.LockHeld, inst:
536 if not wait:
538 if not wait:
537 raise
539 raise
538 self.ui.warn(_("waiting for lock on %s held by %r\n") %
540 self.ui.warn(_("waiting for lock on %s held by %r\n") %
539 (desc, inst.locker))
541 (desc, inst.locker))
540 # default to 600 seconds timeout
542 # default to 600 seconds timeout
541 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
543 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
542 releasefn, desc=desc)
544 releasefn, desc=desc)
543 if acquirefn:
545 if acquirefn:
544 acquirefn()
546 acquirefn()
545 return l
547 return l
546
548
547 def lock(self, wait=1):
549 def lock(self, wait=1):
548 return self.do_lock(self.sjoin("lock"), wait,
550 return self.do_lock(self.sjoin("lock"), wait,
549 acquirefn=self.invalidate,
551 acquirefn=self.invalidate,
550 desc=_('repository %s') % self.origroot)
552 desc=_('repository %s') % self.origroot)
551
553
552 def wlock(self, wait=1):
554 def wlock(self, wait=1):
553 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
555 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
554 self.dirstate.invalidate,
556 self.dirstate.invalidate,
555 desc=_('working directory of %s') % self.origroot)
557 desc=_('working directory of %s') % self.origroot)
556
558
557 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
559 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
558 """
560 """
559 commit an individual file as part of a larger transaction
561 commit an individual file as part of a larger transaction
560 """
562 """
561
563
562 t = self.wread(fn)
564 t = self.wread(fn)
563 fl = self.file(fn)
565 fl = self.file(fn)
564 fp1 = manifest1.get(fn, nullid)
566 fp1 = manifest1.get(fn, nullid)
565 fp2 = manifest2.get(fn, nullid)
567 fp2 = manifest2.get(fn, nullid)
566
568
567 meta = {}
569 meta = {}
568 cp = self.dirstate.copied(fn)
570 cp = self.dirstate.copied(fn)
569 if cp:
571 if cp:
570 # Mark the new revision of this file as a copy of another
572 # Mark the new revision of this file as a copy of another
571 # file. This copy data will effectively act as a parent
573 # file. This copy data will effectively act as a parent
572 # of this new revision. If this is a merge, the first
574 # of this new revision. If this is a merge, the first
573 # parent will be the nullid (meaning "look up the copy data")
575 # parent will be the nullid (meaning "look up the copy data")
574 # and the second one will be the other parent. For example:
576 # and the second one will be the other parent. For example:
575 #
577 #
576 # 0 --- 1 --- 3 rev1 changes file foo
578 # 0 --- 1 --- 3 rev1 changes file foo
577 # \ / rev2 renames foo to bar and changes it
579 # \ / rev2 renames foo to bar and changes it
578 # \- 2 -/ rev3 should have bar with all changes and
580 # \- 2 -/ rev3 should have bar with all changes and
579 # should record that bar descends from
581 # should record that bar descends from
580 # bar in rev2 and foo in rev1
582 # bar in rev2 and foo in rev1
581 #
583 #
582 # this allows this merge to succeed:
584 # this allows this merge to succeed:
583 #
585 #
584 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
586 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
585 # \ / merging rev3 and rev4 should use bar@rev2
587 # \ / merging rev3 and rev4 should use bar@rev2
586 # \- 2 --- 4 as the merge base
588 # \- 2 --- 4 as the merge base
587 #
589 #
588 meta["copy"] = cp
590 meta["copy"] = cp
589 if not manifest2: # not a branch merge
591 if not manifest2: # not a branch merge
590 meta["copyrev"] = hex(manifest1.get(cp, nullid))
592 meta["copyrev"] = hex(manifest1.get(cp, nullid))
591 fp2 = nullid
593 fp2 = nullid
592 elif fp2 != nullid: # copied on remote side
594 elif fp2 != nullid: # copied on remote side
593 meta["copyrev"] = hex(manifest1.get(cp, nullid))
595 meta["copyrev"] = hex(manifest1.get(cp, nullid))
594 elif fp1 != nullid: # copied on local side, reversed
596 elif fp1 != nullid: # copied on local side, reversed
595 meta["copyrev"] = hex(manifest2.get(cp))
597 meta["copyrev"] = hex(manifest2.get(cp))
596 fp2 = fp1
598 fp2 = fp1
597 else: # directory rename
599 else: # directory rename
598 meta["copyrev"] = hex(manifest1.get(cp, nullid))
600 meta["copyrev"] = hex(manifest1.get(cp, nullid))
599 self.ui.debug(_(" %s: copy %s:%s\n") %
601 self.ui.debug(_(" %s: copy %s:%s\n") %
600 (fn, cp, meta["copyrev"]))
602 (fn, cp, meta["copyrev"]))
601 fp1 = nullid
603 fp1 = nullid
602 elif fp2 != nullid:
604 elif fp2 != nullid:
603 # is one parent an ancestor of the other?
605 # is one parent an ancestor of the other?
604 fpa = fl.ancestor(fp1, fp2)
606 fpa = fl.ancestor(fp1, fp2)
605 if fpa == fp1:
607 if fpa == fp1:
606 fp1, fp2 = fp2, nullid
608 fp1, fp2 = fp2, nullid
607 elif fpa == fp2:
609 elif fpa == fp2:
608 fp2 = nullid
610 fp2 = nullid
609
611
610 # is the file unmodified from the parent? report existing entry
612 # is the file unmodified from the parent? report existing entry
611 if fp2 == nullid and not fl.cmp(fp1, t):
613 if fp2 == nullid and not fl.cmp(fp1, t):
612 return fp1
614 return fp1
613
615
614 changelist.append(fn)
616 changelist.append(fn)
615 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
617 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
616
618
617 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
619 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
618 if p1 is None:
620 if p1 is None:
619 p1, p2 = self.dirstate.parents()
621 p1, p2 = self.dirstate.parents()
620 return self.commit(files=files, text=text, user=user, date=date,
622 return self.commit(files=files, text=text, user=user, date=date,
621 p1=p1, p2=p2, wlock=wlock, extra=extra)
623 p1=p1, p2=p2, wlock=wlock, extra=extra)
622
624
623 def commit(self, files=None, text="", user=None, date=None,
625 def commit(self, files=None, text="", user=None, date=None,
624 match=util.always, force=False, lock=None, wlock=None,
626 match=util.always, force=False, lock=None, wlock=None,
625 force_editor=False, p1=None, p2=None, extra={}):
627 force_editor=False, p1=None, p2=None, extra={}):
626
628
627 commit = []
629 commit = []
628 remove = []
630 remove = []
629 changed = []
631 changed = []
630 use_dirstate = (p1 is None) # not rawcommit
632 use_dirstate = (p1 is None) # not rawcommit
631 extra = extra.copy()
633 extra = extra.copy()
632
634
633 if use_dirstate:
635 if use_dirstate:
634 if files:
636 if files:
635 for f in files:
637 for f in files:
636 s = self.dirstate.state(f)
638 s = self.dirstate.state(f)
637 if s in 'nmai':
639 if s in 'nmai':
638 commit.append(f)
640 commit.append(f)
639 elif s == 'r':
641 elif s == 'r':
640 remove.append(f)
642 remove.append(f)
641 else:
643 else:
642 self.ui.warn(_("%s not tracked!\n") % f)
644 self.ui.warn(_("%s not tracked!\n") % f)
643 else:
645 else:
644 changes = self.status(match=match)[:5]
646 changes = self.status(match=match)[:5]
645 modified, added, removed, deleted, unknown = changes
647 modified, added, removed, deleted, unknown = changes
646 commit = modified + added
648 commit = modified + added
647 remove = removed
649 remove = removed
648 else:
650 else:
649 commit = files
651 commit = files
650
652
651 if use_dirstate:
653 if use_dirstate:
652 p1, p2 = self.dirstate.parents()
654 p1, p2 = self.dirstate.parents()
653 update_dirstate = True
655 update_dirstate = True
654 else:
656 else:
655 p1, p2 = p1, p2 or nullid
657 p1, p2 = p1, p2 or nullid
656 update_dirstate = (self.dirstate.parents()[0] == p1)
658 update_dirstate = (self.dirstate.parents()[0] == p1)
657
659
658 c1 = self.changelog.read(p1)
660 c1 = self.changelog.read(p1)
659 c2 = self.changelog.read(p2)
661 c2 = self.changelog.read(p2)
660 m1 = self.manifest.read(c1[0]).copy()
662 m1 = self.manifest.read(c1[0]).copy()
661 m2 = self.manifest.read(c2[0])
663 m2 = self.manifest.read(c2[0])
662
664
663 if use_dirstate:
665 if use_dirstate:
664 branchname = self.workingctx().branch()
666 branchname = self.workingctx().branch()
665 try:
667 try:
666 branchname = branchname.decode('UTF-8').encode('UTF-8')
668 branchname = branchname.decode('UTF-8').encode('UTF-8')
667 except UnicodeDecodeError:
669 except UnicodeDecodeError:
668 raise util.Abort(_('branch name not in UTF-8!'))
670 raise util.Abort(_('branch name not in UTF-8!'))
669 else:
671 else:
670 branchname = ""
672 branchname = ""
671
673
672 if use_dirstate:
674 if use_dirstate:
673 oldname = c1[5].get("branch") # stored in UTF-8
675 oldname = c1[5].get("branch") # stored in UTF-8
674 if (not commit and not remove and not force and p2 == nullid
676 if (not commit and not remove and not force and p2 == nullid
675 and branchname == oldname):
677 and branchname == oldname):
676 self.ui.status(_("nothing changed\n"))
678 self.ui.status(_("nothing changed\n"))
677 return None
679 return None
678
680
679 xp1 = hex(p1)
681 xp1 = hex(p1)
680 if p2 == nullid: xp2 = ''
682 if p2 == nullid: xp2 = ''
681 else: xp2 = hex(p2)
683 else: xp2 = hex(p2)
682
684
683 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
685 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
684
686
685 if not wlock:
687 if not wlock:
686 wlock = self.wlock()
688 wlock = self.wlock()
687 if not lock:
689 if not lock:
688 lock = self.lock()
690 lock = self.lock()
689 tr = self.transaction()
691 tr = self.transaction()
690
692
691 # check in files
693 # check in files
692 new = {}
694 new = {}
693 linkrev = self.changelog.count()
695 linkrev = self.changelog.count()
694 commit.sort()
696 commit.sort()
695 is_exec = util.execfunc(self.root, m1.execf)
697 is_exec = util.execfunc(self.root, m1.execf)
696 is_link = util.linkfunc(self.root, m1.linkf)
698 is_link = util.linkfunc(self.root, m1.linkf)
697 for f in commit:
699 for f in commit:
698 self.ui.note(f + "\n")
700 self.ui.note(f + "\n")
699 try:
701 try:
700 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
702 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
701 new_exec = is_exec(f)
703 new_exec = is_exec(f)
702 new_link = is_link(f)
704 new_link = is_link(f)
703 if not changed or changed[-1] != f:
705 if not changed or changed[-1] != f:
704 # mention the file in the changelog if some flag changed,
706 # mention the file in the changelog if some flag changed,
705 # even if there was no content change.
707 # even if there was no content change.
706 old_exec = m1.execf(f)
708 old_exec = m1.execf(f)
707 old_link = m1.linkf(f)
709 old_link = m1.linkf(f)
708 if old_exec != new_exec or old_link != new_link:
710 if old_exec != new_exec or old_link != new_link:
709 changed.append(f)
711 changed.append(f)
710 m1.set(f, new_exec, new_link)
712 m1.set(f, new_exec, new_link)
711 except (OSError, IOError):
713 except (OSError, IOError):
712 if use_dirstate:
714 if use_dirstate:
713 self.ui.warn(_("trouble committing %s!\n") % f)
715 self.ui.warn(_("trouble committing %s!\n") % f)
714 raise
716 raise
715 else:
717 else:
716 remove.append(f)
718 remove.append(f)
717
719
718 # update manifest
720 # update manifest
719 m1.update(new)
721 m1.update(new)
720 remove.sort()
722 remove.sort()
721 removed = []
723 removed = []
722
724
723 for f in remove:
725 for f in remove:
724 if f in m1:
726 if f in m1:
725 del m1[f]
727 del m1[f]
726 removed.append(f)
728 removed.append(f)
727 elif f in m2:
729 elif f in m2:
728 removed.append(f)
730 removed.append(f)
729 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
731 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
730
732
731 # add changeset
733 # add changeset
732 new = new.keys()
734 new = new.keys()
733 new.sort()
735 new.sort()
734
736
735 user = user or self.ui.username()
737 user = user or self.ui.username()
736 if not text or force_editor:
738 if not text or force_editor:
737 edittext = []
739 edittext = []
738 if text:
740 if text:
739 edittext.append(text)
741 edittext.append(text)
740 edittext.append("")
742 edittext.append("")
741 edittext.append("HG: user: %s" % user)
743 edittext.append("HG: user: %s" % user)
742 if p2 != nullid:
744 if p2 != nullid:
743 edittext.append("HG: branch merge")
745 edittext.append("HG: branch merge")
744 if branchname:
746 if branchname:
745 edittext.append("HG: branch %s" % util.tolocal(branchname))
747 edittext.append("HG: branch %s" % util.tolocal(branchname))
746 edittext.extend(["HG: changed %s" % f for f in changed])
748 edittext.extend(["HG: changed %s" % f for f in changed])
747 edittext.extend(["HG: removed %s" % f for f in removed])
749 edittext.extend(["HG: removed %s" % f for f in removed])
748 if not changed and not remove:
750 if not changed and not remove:
749 edittext.append("HG: no files changed")
751 edittext.append("HG: no files changed")
750 edittext.append("")
752 edittext.append("")
751 # run editor in the repository root
753 # run editor in the repository root
752 olddir = os.getcwd()
754 olddir = os.getcwd()
753 os.chdir(self.root)
755 os.chdir(self.root)
754 text = self.ui.edit("\n".join(edittext), user)
756 text = self.ui.edit("\n".join(edittext), user)
755 os.chdir(olddir)
757 os.chdir(olddir)
756
758
757 lines = [line.rstrip() for line in text.rstrip().splitlines()]
759 lines = [line.rstrip() for line in text.rstrip().splitlines()]
758 while lines and not lines[0]:
760 while lines and not lines[0]:
759 del lines[0]
761 del lines[0]
760 if not lines:
762 if not lines:
761 return None
763 return None
762 text = '\n'.join(lines)
764 text = '\n'.join(lines)
763 if branchname:
765 if branchname:
764 extra["branch"] = branchname
766 extra["branch"] = branchname
765 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
767 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
766 user, date, extra)
768 user, date, extra)
767 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
769 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
768 parent2=xp2)
770 parent2=xp2)
769 tr.close()
771 tr.close()
770
772
771 if self.branchcache and "branch" in extra:
773 if self.branchcache and "branch" in extra:
772 self.branchcache[util.tolocal(extra["branch"])] = n
774 self.branchcache[util.tolocal(extra["branch"])] = n
773
775
774 if use_dirstate or update_dirstate:
776 if use_dirstate or update_dirstate:
775 self.dirstate.setparents(n)
777 self.dirstate.setparents(n)
776 if use_dirstate:
778 if use_dirstate:
777 self.dirstate.update(new, "n")
779 self.dirstate.update(new, "n")
778 self.dirstate.forget(removed)
780 self.dirstate.forget(removed)
779
781
780 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
782 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
781 return n
783 return n
782
784
783 def walk(self, node=None, files=[], match=util.always, badmatch=None):
785 def walk(self, node=None, files=[], match=util.always, badmatch=None):
784 '''
786 '''
785 walk recursively through the directory tree or a given
787 walk recursively through the directory tree or a given
786 changeset, finding all files matched by the match
788 changeset, finding all files matched by the match
787 function
789 function
788
790
789 results are yielded in a tuple (src, filename), where src
791 results are yielded in a tuple (src, filename), where src
790 is one of:
792 is one of:
791 'f' the file was found in the directory tree
793 'f' the file was found in the directory tree
792 'm' the file was only in the dirstate and not in the tree
794 'm' the file was only in the dirstate and not in the tree
793 'b' file was not found and matched badmatch
795 'b' file was not found and matched badmatch
794 '''
796 '''
795
797
796 if node:
798 if node:
797 fdict = dict.fromkeys(files)
799 fdict = dict.fromkeys(files)
798 # for dirstate.walk, files=['.'] means "walk the whole tree".
800 # for dirstate.walk, files=['.'] means "walk the whole tree".
799 # follow that here, too
801 # follow that here, too
800 fdict.pop('.', None)
802 fdict.pop('.', None)
801 mdict = self.manifest.read(self.changelog.read(node)[0])
803 mdict = self.manifest.read(self.changelog.read(node)[0])
802 mfiles = mdict.keys()
804 mfiles = mdict.keys()
803 mfiles.sort()
805 mfiles.sort()
804 for fn in mfiles:
806 for fn in mfiles:
805 for ffn in fdict:
807 for ffn in fdict:
806 # match if the file is the exact name or a directory
808 # match if the file is the exact name or a directory
807 if ffn == fn or fn.startswith("%s/" % ffn):
809 if ffn == fn or fn.startswith("%s/" % ffn):
808 del fdict[ffn]
810 del fdict[ffn]
809 break
811 break
810 if match(fn):
812 if match(fn):
811 yield 'm', fn
813 yield 'm', fn
812 ffiles = fdict.keys()
814 ffiles = fdict.keys()
813 ffiles.sort()
815 ffiles.sort()
814 for fn in ffiles:
816 for fn in ffiles:
815 if badmatch and badmatch(fn):
817 if badmatch and badmatch(fn):
816 if match(fn):
818 if match(fn):
817 yield 'b', fn
819 yield 'b', fn
818 else:
820 else:
819 self.ui.warn(_('%s: No such file in rev %s\n')
821 self.ui.warn(_('%s: No such file in rev %s\n')
820 % (self.pathto(fn), short(node)))
822 % (self.pathto(fn), short(node)))
821 else:
823 else:
822 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
824 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
823 yield src, fn
825 yield src, fn
824
826
825 def status(self, node1=None, node2=None, files=[], match=util.always,
827 def status(self, node1=None, node2=None, files=[], match=util.always,
826 wlock=None, list_ignored=False, list_clean=False):
828 wlock=None, list_ignored=False, list_clean=False):
827 """return status of files between two nodes or node and working directory
829 """return status of files between two nodes or node and working directory
828
830
829 If node1 is None, use the first dirstate parent instead.
831 If node1 is None, use the first dirstate parent instead.
830 If node2 is None, compare node1 with working directory.
832 If node2 is None, compare node1 with working directory.
831 """
833 """
832
834
833 def fcmp(fn, getnode):
835 def fcmp(fn, getnode):
834 t1 = self.wread(fn)
836 t1 = self.wread(fn)
835 return self.file(fn).cmp(getnode(fn), t1)
837 return self.file(fn).cmp(getnode(fn), t1)
836
838
837 def mfmatches(node):
839 def mfmatches(node):
838 change = self.changelog.read(node)
840 change = self.changelog.read(node)
839 mf = self.manifest.read(change[0]).copy()
841 mf = self.manifest.read(change[0]).copy()
840 for fn in mf.keys():
842 for fn in mf.keys():
841 if not match(fn):
843 if not match(fn):
842 del mf[fn]
844 del mf[fn]
843 return mf
845 return mf
844
846
845 modified, added, removed, deleted, unknown = [], [], [], [], []
847 modified, added, removed, deleted, unknown = [], [], [], [], []
846 ignored, clean = [], []
848 ignored, clean = [], []
847
849
848 compareworking = False
850 compareworking = False
849 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
851 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
850 compareworking = True
852 compareworking = True
851
853
852 if not compareworking:
854 if not compareworking:
853 # read the manifest from node1 before the manifest from node2,
855 # read the manifest from node1 before the manifest from node2,
854 # so that we'll hit the manifest cache if we're going through
856 # so that we'll hit the manifest cache if we're going through
855 # all the revisions in parent->child order.
857 # all the revisions in parent->child order.
856 mf1 = mfmatches(node1)
858 mf1 = mfmatches(node1)
857
859
858 mywlock = False
860 mywlock = False
859
861
860 # are we comparing the working directory?
862 # are we comparing the working directory?
861 if not node2:
863 if not node2:
862 (lookup, modified, added, removed, deleted, unknown,
864 (lookup, modified, added, removed, deleted, unknown,
863 ignored, clean) = self.dirstate.status(files, match,
865 ignored, clean) = self.dirstate.status(files, match,
864 list_ignored, list_clean)
866 list_ignored, list_clean)
865
867
866 # are we comparing working dir against its parent?
868 # are we comparing working dir against its parent?
867 if compareworking:
869 if compareworking:
868 if lookup:
870 if lookup:
869 # do a full compare of any files that might have changed
871 # do a full compare of any files that might have changed
870 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
872 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
871 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
873 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
872 nullid)
874 nullid)
873 for f in lookup:
875 for f in lookup:
874 if fcmp(f, getnode):
876 if fcmp(f, getnode):
875 modified.append(f)
877 modified.append(f)
876 else:
878 else:
877 if list_clean:
879 if list_clean:
878 clean.append(f)
880 clean.append(f)
879 if not wlock and not mywlock:
881 if not wlock and not mywlock:
880 mywlock = True
882 mywlock = True
881 try:
883 try:
882 wlock = self.wlock(wait=0)
884 wlock = self.wlock(wait=0)
883 except lock.LockException:
885 except lock.LockException:
884 pass
886 pass
885 if wlock:
887 if wlock:
886 self.dirstate.update([f], "n")
888 self.dirstate.update([f], "n")
887 else:
889 else:
888 # we are comparing working dir against non-parent
890 # we are comparing working dir against non-parent
889 # generate a pseudo-manifest for the working dir
891 # generate a pseudo-manifest for the working dir
890 # XXX: create it in dirstate.py ?
892 # XXX: create it in dirstate.py ?
891 mf2 = mfmatches(self.dirstate.parents()[0])
893 mf2 = mfmatches(self.dirstate.parents()[0])
892 is_exec = util.execfunc(self.root, mf2.execf)
894 is_exec = util.execfunc(self.root, mf2.execf)
893 is_link = util.linkfunc(self.root, mf2.linkf)
895 is_link = util.linkfunc(self.root, mf2.linkf)
894 for f in lookup + modified + added:
896 for f in lookup + modified + added:
895 mf2[f] = ""
897 mf2[f] = ""
896 mf2.set(f, is_exec(f), is_link(f))
898 mf2.set(f, is_exec(f), is_link(f))
897 for f in removed:
899 for f in removed:
898 if f in mf2:
900 if f in mf2:
899 del mf2[f]
901 del mf2[f]
900
902
901 if mywlock and wlock:
903 if mywlock and wlock:
902 wlock.release()
904 wlock.release()
903 else:
905 else:
904 # we are comparing two revisions
906 # we are comparing two revisions
905 mf2 = mfmatches(node2)
907 mf2 = mfmatches(node2)
906
908
907 if not compareworking:
909 if not compareworking:
908 # flush lists from dirstate before comparing manifests
910 # flush lists from dirstate before comparing manifests
909 modified, added, clean = [], [], []
911 modified, added, clean = [], [], []
910
912
911 # make sure to sort the files so we talk to the disk in a
913 # make sure to sort the files so we talk to the disk in a
912 # reasonable order
914 # reasonable order
913 mf2keys = mf2.keys()
915 mf2keys = mf2.keys()
914 mf2keys.sort()
916 mf2keys.sort()
915 getnode = lambda fn: mf1.get(fn, nullid)
917 getnode = lambda fn: mf1.get(fn, nullid)
916 for fn in mf2keys:
918 for fn in mf2keys:
917 if mf1.has_key(fn):
919 if mf1.has_key(fn):
918 if (mf1.flags(fn) != mf2.flags(fn) or
920 if (mf1.flags(fn) != mf2.flags(fn) or
919 (mf1[fn] != mf2[fn] and
921 (mf1[fn] != mf2[fn] and
920 (mf2[fn] != "" or fcmp(fn, getnode)))):
922 (mf2[fn] != "" or fcmp(fn, getnode)))):
921 modified.append(fn)
923 modified.append(fn)
922 elif list_clean:
924 elif list_clean:
923 clean.append(fn)
925 clean.append(fn)
924 del mf1[fn]
926 del mf1[fn]
925 else:
927 else:
926 added.append(fn)
928 added.append(fn)
927
929
928 removed = mf1.keys()
930 removed = mf1.keys()
929
931
930 # sort and return results:
932 # sort and return results:
931 for l in modified, added, removed, deleted, unknown, ignored, clean:
933 for l in modified, added, removed, deleted, unknown, ignored, clean:
932 l.sort()
934 l.sort()
933 return (modified, added, removed, deleted, unknown, ignored, clean)
935 return (modified, added, removed, deleted, unknown, ignored, clean)
934
936
935 def add(self, list, wlock=None):
937 def add(self, list, wlock=None):
936 if not wlock:
938 if not wlock:
937 wlock = self.wlock()
939 wlock = self.wlock()
938 for f in list:
940 for f in list:
939 p = self.wjoin(f)
941 p = self.wjoin(f)
940 try:
942 try:
941 st = os.lstat(p)
943 st = os.lstat(p)
942 except:
944 except:
943 self.ui.warn(_("%s does not exist!\n") % f)
945 self.ui.warn(_("%s does not exist!\n") % f)
944 continue
946 continue
945 if st.st_size > 10000000:
947 if st.st_size > 10000000:
946 self.ui.warn(_("%s: files over 10MB may cause memory and"
948 self.ui.warn(_("%s: files over 10MB may cause memory and"
947 " performance problems\n"
949 " performance problems\n"
948 "(use 'hg revert %s' to unadd the file)\n")
950 "(use 'hg revert %s' to unadd the file)\n")
949 % (f, f))
951 % (f, f))
950 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
952 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
951 self.ui.warn(_("%s not added: only files and symlinks "
953 self.ui.warn(_("%s not added: only files and symlinks "
952 "supported currently\n") % f)
954 "supported currently\n") % f)
953 elif self.dirstate.state(f) in 'an':
955 elif self.dirstate.state(f) in 'an':
954 self.ui.warn(_("%s already tracked!\n") % f)
956 self.ui.warn(_("%s already tracked!\n") % f)
955 else:
957 else:
956 self.dirstate.update([f], "a")
958 self.dirstate.update([f], "a")
957
959
958 def forget(self, list, wlock=None):
960 def forget(self, list, wlock=None):
959 if not wlock:
961 if not wlock:
960 wlock = self.wlock()
962 wlock = self.wlock()
961 for f in list:
963 for f in list:
962 if self.dirstate.state(f) not in 'ai':
964 if self.dirstate.state(f) not in 'ai':
963 self.ui.warn(_("%s not added!\n") % f)
965 self.ui.warn(_("%s not added!\n") % f)
964 else:
966 else:
965 self.dirstate.forget([f])
967 self.dirstate.forget([f])
966
968
967 def remove(self, list, unlink=False, wlock=None):
969 def remove(self, list, unlink=False, wlock=None):
968 if unlink:
970 if unlink:
969 for f in list:
971 for f in list:
970 try:
972 try:
971 util.unlink(self.wjoin(f))
973 util.unlink(self.wjoin(f))
972 except OSError, inst:
974 except OSError, inst:
973 if inst.errno != errno.ENOENT:
975 if inst.errno != errno.ENOENT:
974 raise
976 raise
975 if not wlock:
977 if not wlock:
976 wlock = self.wlock()
978 wlock = self.wlock()
977 for f in list:
979 for f in list:
978 if unlink and os.path.exists(self.wjoin(f)):
980 if unlink and os.path.exists(self.wjoin(f)):
979 self.ui.warn(_("%s still exists!\n") % f)
981 self.ui.warn(_("%s still exists!\n") % f)
980 elif self.dirstate.state(f) == 'a':
982 elif self.dirstate.state(f) == 'a':
981 self.dirstate.forget([f])
983 self.dirstate.forget([f])
982 elif f not in self.dirstate:
984 elif f not in self.dirstate:
983 self.ui.warn(_("%s not tracked!\n") % f)
985 self.ui.warn(_("%s not tracked!\n") % f)
984 else:
986 else:
985 self.dirstate.update([f], "r")
987 self.dirstate.update([f], "r")
986
988
987 def undelete(self, list, wlock=None):
989 def undelete(self, list, wlock=None):
988 p = self.dirstate.parents()[0]
990 p = self.dirstate.parents()[0]
989 mn = self.changelog.read(p)[0]
991 mn = self.changelog.read(p)[0]
990 m = self.manifest.read(mn)
992 m = self.manifest.read(mn)
991 if not wlock:
993 if not wlock:
992 wlock = self.wlock()
994 wlock = self.wlock()
993 for f in list:
995 for f in list:
994 if self.dirstate.state(f) not in "r":
996 if self.dirstate.state(f) not in "r":
995 self.ui.warn("%s not removed!\n" % f)
997 self.ui.warn("%s not removed!\n" % f)
996 else:
998 else:
997 t = self.file(f).read(m[f])
999 t = self.file(f).read(m[f])
998 self.wwrite(f, t, m.flags(f))
1000 self.wwrite(f, t, m.flags(f))
999 self.dirstate.update([f], "n")
1001 self.dirstate.update([f], "n")
1000
1002
1001 def copy(self, source, dest, wlock=None):
1003 def copy(self, source, dest, wlock=None):
1002 p = self.wjoin(dest)
1004 p = self.wjoin(dest)
1003 if not (os.path.exists(p) or os.path.islink(p)):
1005 if not (os.path.exists(p) or os.path.islink(p)):
1004 self.ui.warn(_("%s does not exist!\n") % dest)
1006 self.ui.warn(_("%s does not exist!\n") % dest)
1005 elif not (os.path.isfile(p) or os.path.islink(p)):
1007 elif not (os.path.isfile(p) or os.path.islink(p)):
1006 self.ui.warn(_("copy failed: %s is not a file or a "
1008 self.ui.warn(_("copy failed: %s is not a file or a "
1007 "symbolic link\n") % dest)
1009 "symbolic link\n") % dest)
1008 else:
1010 else:
1009 if not wlock:
1011 if not wlock:
1010 wlock = self.wlock()
1012 wlock = self.wlock()
1011 if self.dirstate.state(dest) == '?':
1013 if self.dirstate.state(dest) == '?':
1012 self.dirstate.update([dest], "a")
1014 self.dirstate.update([dest], "a")
1013 self.dirstate.copy(source, dest)
1015 self.dirstate.copy(source, dest)
1014
1016
1015 def heads(self, start=None):
1017 def heads(self, start=None):
1016 heads = self.changelog.heads(start)
1018 heads = self.changelog.heads(start)
1017 # sort the output in rev descending order
1019 # sort the output in rev descending order
1018 heads = [(-self.changelog.rev(h), h) for h in heads]
1020 heads = [(-self.changelog.rev(h), h) for h in heads]
1019 heads.sort()
1021 heads.sort()
1020 return [n for (r, n) in heads]
1022 return [n for (r, n) in heads]
1021
1023
1022 def branchheads(self, branch, start=None):
1024 def branchheads(self, branch, start=None):
1023 branches = self.branchtags()
1025 branches = self.branchtags()
1024 if branch not in branches:
1026 if branch not in branches:
1025 return []
1027 return []
1026 # The basic algorithm is this:
1028 # The basic algorithm is this:
1027 #
1029 #
1028 # Start from the branch tip since there are no later revisions that can
1030 # Start from the branch tip since there are no later revisions that can
1029 # possibly be in this branch, and the tip is a guaranteed head.
1031 # possibly be in this branch, and the tip is a guaranteed head.
1030 #
1032 #
1031 # Remember the tip's parents as the first ancestors, since these by
1033 # Remember the tip's parents as the first ancestors, since these by
1032 # definition are not heads.
1034 # definition are not heads.
1033 #
1035 #
1034 # Step backwards from the brach tip through all the revisions. We are
1036 # Step backwards from the brach tip through all the revisions. We are
1035 # guaranteed by the rules of Mercurial that we will now be visiting the
1037 # guaranteed by the rules of Mercurial that we will now be visiting the
1036 # nodes in reverse topological order (children before parents).
1038 # nodes in reverse topological order (children before parents).
1037 #
1039 #
1038 # If a revision is one of the ancestors of a head then we can toss it
1040 # If a revision is one of the ancestors of a head then we can toss it
1039 # out of the ancestors set (we've already found it and won't be
1041 # out of the ancestors set (we've already found it and won't be
1040 # visiting it again) and put its parents in the ancestors set.
1042 # visiting it again) and put its parents in the ancestors set.
1041 #
1043 #
1042 # Otherwise, if a revision is in the branch it's another head, since it
1044 # Otherwise, if a revision is in the branch it's another head, since it
1043 # wasn't in the ancestor list of an existing head. So add it to the
1045 # wasn't in the ancestor list of an existing head. So add it to the
1044 # head list, and add its parents to the ancestor list.
1046 # head list, and add its parents to the ancestor list.
1045 #
1047 #
1046 # If it is not in the branch ignore it.
1048 # If it is not in the branch ignore it.
1047 #
1049 #
1048 # Once we have a list of heads, use nodesbetween to filter out all the
1050 # Once we have a list of heads, use nodesbetween to filter out all the
1049 # heads that cannot be reached from startrev. There may be a more
1051 # heads that cannot be reached from startrev. There may be a more
1050 # efficient way to do this as part of the previous algorithm.
1052 # efficient way to do this as part of the previous algorithm.
1051
1053
1052 set = util.set
1054 set = util.set
1053 heads = [self.changelog.rev(branches[branch])]
1055 heads = [self.changelog.rev(branches[branch])]
1054 # Don't care if ancestors contains nullrev or not.
1056 # Don't care if ancestors contains nullrev or not.
1055 ancestors = set(self.changelog.parentrevs(heads[0]))
1057 ancestors = set(self.changelog.parentrevs(heads[0]))
1056 for rev in xrange(heads[0] - 1, nullrev, -1):
1058 for rev in xrange(heads[0] - 1, nullrev, -1):
1057 if rev in ancestors:
1059 if rev in ancestors:
1058 ancestors.update(self.changelog.parentrevs(rev))
1060 ancestors.update(self.changelog.parentrevs(rev))
1059 ancestors.remove(rev)
1061 ancestors.remove(rev)
1060 elif self.changectx(rev).branch() == branch:
1062 elif self.changectx(rev).branch() == branch:
1061 heads.append(rev)
1063 heads.append(rev)
1062 ancestors.update(self.changelog.parentrevs(rev))
1064 ancestors.update(self.changelog.parentrevs(rev))
1063 heads = [self.changelog.node(rev) for rev in heads]
1065 heads = [self.changelog.node(rev) for rev in heads]
1064 if start is not None:
1066 if start is not None:
1065 heads = self.changelog.nodesbetween([start], heads)[2]
1067 heads = self.changelog.nodesbetween([start], heads)[2]
1066 return heads
1068 return heads
1067
1069
1068 def branches(self, nodes):
1070 def branches(self, nodes):
1069 if not nodes:
1071 if not nodes:
1070 nodes = [self.changelog.tip()]
1072 nodes = [self.changelog.tip()]
1071 b = []
1073 b = []
1072 for n in nodes:
1074 for n in nodes:
1073 t = n
1075 t = n
1074 while 1:
1076 while 1:
1075 p = self.changelog.parents(n)
1077 p = self.changelog.parents(n)
1076 if p[1] != nullid or p[0] == nullid:
1078 if p[1] != nullid or p[0] == nullid:
1077 b.append((t, n, p[0], p[1]))
1079 b.append((t, n, p[0], p[1]))
1078 break
1080 break
1079 n = p[0]
1081 n = p[0]
1080 return b
1082 return b
1081
1083
1082 def between(self, pairs):
1084 def between(self, pairs):
1083 r = []
1085 r = []
1084
1086
1085 for top, bottom in pairs:
1087 for top, bottom in pairs:
1086 n, l, i = top, [], 0
1088 n, l, i = top, [], 0
1087 f = 1
1089 f = 1
1088
1090
1089 while n != bottom:
1091 while n != bottom:
1090 p = self.changelog.parents(n)[0]
1092 p = self.changelog.parents(n)[0]
1091 if i == f:
1093 if i == f:
1092 l.append(n)
1094 l.append(n)
1093 f = f * 2
1095 f = f * 2
1094 n = p
1096 n = p
1095 i += 1
1097 i += 1
1096
1098
1097 r.append(l)
1099 r.append(l)
1098
1100
1099 return r
1101 return r
1100
1102
1101 def findincoming(self, remote, base=None, heads=None, force=False):
1103 def findincoming(self, remote, base=None, heads=None, force=False):
1102 """Return list of roots of the subsets of missing nodes from remote
1104 """Return list of roots of the subsets of missing nodes from remote
1103
1105
1104 If base dict is specified, assume that these nodes and their parents
1106 If base dict is specified, assume that these nodes and their parents
1105 exist on the remote side and that no child of a node of base exists
1107 exist on the remote side and that no child of a node of base exists
1106 in both remote and self.
1108 in both remote and self.
1107 Furthermore base will be updated to include the nodes that exists
1109 Furthermore base will be updated to include the nodes that exists
1108 in self and remote but no children exists in self and remote.
1110 in self and remote but no children exists in self and remote.
1109 If a list of heads is specified, return only nodes which are heads
1111 If a list of heads is specified, return only nodes which are heads
1110 or ancestors of these heads.
1112 or ancestors of these heads.
1111
1113
1112 All the ancestors of base are in self and in remote.
1114 All the ancestors of base are in self and in remote.
1113 All the descendants of the list returned are missing in self.
1115 All the descendants of the list returned are missing in self.
1114 (and so we know that the rest of the nodes are missing in remote, see
1116 (and so we know that the rest of the nodes are missing in remote, see
1115 outgoing)
1117 outgoing)
1116 """
1118 """
1117 m = self.changelog.nodemap
1119 m = self.changelog.nodemap
1118 search = []
1120 search = []
1119 fetch = {}
1121 fetch = {}
1120 seen = {}
1122 seen = {}
1121 seenbranch = {}
1123 seenbranch = {}
1122 if base == None:
1124 if base == None:
1123 base = {}
1125 base = {}
1124
1126
1125 if not heads:
1127 if not heads:
1126 heads = remote.heads()
1128 heads = remote.heads()
1127
1129
1128 if self.changelog.tip() == nullid:
1130 if self.changelog.tip() == nullid:
1129 base[nullid] = 1
1131 base[nullid] = 1
1130 if heads != [nullid]:
1132 if heads != [nullid]:
1131 return [nullid]
1133 return [nullid]
1132 return []
1134 return []
1133
1135
1134 # assume we're closer to the tip than the root
1136 # assume we're closer to the tip than the root
1135 # and start by examining the heads
1137 # and start by examining the heads
1136 self.ui.status(_("searching for changes\n"))
1138 self.ui.status(_("searching for changes\n"))
1137
1139
1138 unknown = []
1140 unknown = []
1139 for h in heads:
1141 for h in heads:
1140 if h not in m:
1142 if h not in m:
1141 unknown.append(h)
1143 unknown.append(h)
1142 else:
1144 else:
1143 base[h] = 1
1145 base[h] = 1
1144
1146
1145 if not unknown:
1147 if not unknown:
1146 return []
1148 return []
1147
1149
1148 req = dict.fromkeys(unknown)
1150 req = dict.fromkeys(unknown)
1149 reqcnt = 0
1151 reqcnt = 0
1150
1152
1151 # search through remote branches
1153 # search through remote branches
1152 # a 'branch' here is a linear segment of history, with four parts:
1154 # a 'branch' here is a linear segment of history, with four parts:
1153 # head, root, first parent, second parent
1155 # head, root, first parent, second parent
1154 # (a branch always has two parents (or none) by definition)
1156 # (a branch always has two parents (or none) by definition)
1155 unknown = remote.branches(unknown)
1157 unknown = remote.branches(unknown)
1156 while unknown:
1158 while unknown:
1157 r = []
1159 r = []
1158 while unknown:
1160 while unknown:
1159 n = unknown.pop(0)
1161 n = unknown.pop(0)
1160 if n[0] in seen:
1162 if n[0] in seen:
1161 continue
1163 continue
1162
1164
1163 self.ui.debug(_("examining %s:%s\n")
1165 self.ui.debug(_("examining %s:%s\n")
1164 % (short(n[0]), short(n[1])))
1166 % (short(n[0]), short(n[1])))
1165 if n[0] == nullid: # found the end of the branch
1167 if n[0] == nullid: # found the end of the branch
1166 pass
1168 pass
1167 elif n in seenbranch:
1169 elif n in seenbranch:
1168 self.ui.debug(_("branch already found\n"))
1170 self.ui.debug(_("branch already found\n"))
1169 continue
1171 continue
1170 elif n[1] and n[1] in m: # do we know the base?
1172 elif n[1] and n[1] in m: # do we know the base?
1171 self.ui.debug(_("found incomplete branch %s:%s\n")
1173 self.ui.debug(_("found incomplete branch %s:%s\n")
1172 % (short(n[0]), short(n[1])))
1174 % (short(n[0]), short(n[1])))
1173 search.append(n) # schedule branch range for scanning
1175 search.append(n) # schedule branch range for scanning
1174 seenbranch[n] = 1
1176 seenbranch[n] = 1
1175 else:
1177 else:
1176 if n[1] not in seen and n[1] not in fetch:
1178 if n[1] not in seen and n[1] not in fetch:
1177 if n[2] in m and n[3] in m:
1179 if n[2] in m and n[3] in m:
1178 self.ui.debug(_("found new changeset %s\n") %
1180 self.ui.debug(_("found new changeset %s\n") %
1179 short(n[1]))
1181 short(n[1]))
1180 fetch[n[1]] = 1 # earliest unknown
1182 fetch[n[1]] = 1 # earliest unknown
1181 for p in n[2:4]:
1183 for p in n[2:4]:
1182 if p in m:
1184 if p in m:
1183 base[p] = 1 # latest known
1185 base[p] = 1 # latest known
1184
1186
1185 for p in n[2:4]:
1187 for p in n[2:4]:
1186 if p not in req and p not in m:
1188 if p not in req and p not in m:
1187 r.append(p)
1189 r.append(p)
1188 req[p] = 1
1190 req[p] = 1
1189 seen[n[0]] = 1
1191 seen[n[0]] = 1
1190
1192
1191 if r:
1193 if r:
1192 reqcnt += 1
1194 reqcnt += 1
1193 self.ui.debug(_("request %d: %s\n") %
1195 self.ui.debug(_("request %d: %s\n") %
1194 (reqcnt, " ".join(map(short, r))))
1196 (reqcnt, " ".join(map(short, r))))
1195 for p in xrange(0, len(r), 10):
1197 for p in xrange(0, len(r), 10):
1196 for b in remote.branches(r[p:p+10]):
1198 for b in remote.branches(r[p:p+10]):
1197 self.ui.debug(_("received %s:%s\n") %
1199 self.ui.debug(_("received %s:%s\n") %
1198 (short(b[0]), short(b[1])))
1200 (short(b[0]), short(b[1])))
1199 unknown.append(b)
1201 unknown.append(b)
1200
1202
1201 # do binary search on the branches we found
1203 # do binary search on the branches we found
1202 while search:
1204 while search:
1203 n = search.pop(0)
1205 n = search.pop(0)
1204 reqcnt += 1
1206 reqcnt += 1
1205 l = remote.between([(n[0], n[1])])[0]
1207 l = remote.between([(n[0], n[1])])[0]
1206 l.append(n[1])
1208 l.append(n[1])
1207 p = n[0]
1209 p = n[0]
1208 f = 1
1210 f = 1
1209 for i in l:
1211 for i in l:
1210 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1212 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1211 if i in m:
1213 if i in m:
1212 if f <= 2:
1214 if f <= 2:
1213 self.ui.debug(_("found new branch changeset %s\n") %
1215 self.ui.debug(_("found new branch changeset %s\n") %
1214 short(p))
1216 short(p))
1215 fetch[p] = 1
1217 fetch[p] = 1
1216 base[i] = 1
1218 base[i] = 1
1217 else:
1219 else:
1218 self.ui.debug(_("narrowed branch search to %s:%s\n")
1220 self.ui.debug(_("narrowed branch search to %s:%s\n")
1219 % (short(p), short(i)))
1221 % (short(p), short(i)))
1220 search.append((p, i))
1222 search.append((p, i))
1221 break
1223 break
1222 p, f = i, f * 2
1224 p, f = i, f * 2
1223
1225
1224 # sanity check our fetch list
1226 # sanity check our fetch list
1225 for f in fetch.keys():
1227 for f in fetch.keys():
1226 if f in m:
1228 if f in m:
1227 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1229 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1228
1230
1229 if base.keys() == [nullid]:
1231 if base.keys() == [nullid]:
1230 if force:
1232 if force:
1231 self.ui.warn(_("warning: repository is unrelated\n"))
1233 self.ui.warn(_("warning: repository is unrelated\n"))
1232 else:
1234 else:
1233 raise util.Abort(_("repository is unrelated"))
1235 raise util.Abort(_("repository is unrelated"))
1234
1236
1235 self.ui.debug(_("found new changesets starting at ") +
1237 self.ui.debug(_("found new changesets starting at ") +
1236 " ".join([short(f) for f in fetch]) + "\n")
1238 " ".join([short(f) for f in fetch]) + "\n")
1237
1239
1238 self.ui.debug(_("%d total queries\n") % reqcnt)
1240 self.ui.debug(_("%d total queries\n") % reqcnt)
1239
1241
1240 return fetch.keys()
1242 return fetch.keys()
1241
1243
1242 def findoutgoing(self, remote, base=None, heads=None, force=False):
1244 def findoutgoing(self, remote, base=None, heads=None, force=False):
1243 """Return list of nodes that are roots of subsets not in remote
1245 """Return list of nodes that are roots of subsets not in remote
1244
1246
1245 If base dict is specified, assume that these nodes and their parents
1247 If base dict is specified, assume that these nodes and their parents
1246 exist on the remote side.
1248 exist on the remote side.
1247 If a list of heads is specified, return only nodes which are heads
1249 If a list of heads is specified, return only nodes which are heads
1248 or ancestors of these heads, and return a second element which
1250 or ancestors of these heads, and return a second element which
1249 contains all remote heads which get new children.
1251 contains all remote heads which get new children.
1250 """
1252 """
1251 if base == None:
1253 if base == None:
1252 base = {}
1254 base = {}
1253 self.findincoming(remote, base, heads, force=force)
1255 self.findincoming(remote, base, heads, force=force)
1254
1256
1255 self.ui.debug(_("common changesets up to ")
1257 self.ui.debug(_("common changesets up to ")
1256 + " ".join(map(short, base.keys())) + "\n")
1258 + " ".join(map(short, base.keys())) + "\n")
1257
1259
1258 remain = dict.fromkeys(self.changelog.nodemap)
1260 remain = dict.fromkeys(self.changelog.nodemap)
1259
1261
1260 # prune everything remote has from the tree
1262 # prune everything remote has from the tree
1261 del remain[nullid]
1263 del remain[nullid]
1262 remove = base.keys()
1264 remove = base.keys()
1263 while remove:
1265 while remove:
1264 n = remove.pop(0)
1266 n = remove.pop(0)
1265 if n in remain:
1267 if n in remain:
1266 del remain[n]
1268 del remain[n]
1267 for p in self.changelog.parents(n):
1269 for p in self.changelog.parents(n):
1268 remove.append(p)
1270 remove.append(p)
1269
1271
1270 # find every node whose parents have been pruned
1272 # find every node whose parents have been pruned
1271 subset = []
1273 subset = []
1272 # find every remote head that will get new children
1274 # find every remote head that will get new children
1273 updated_heads = {}
1275 updated_heads = {}
1274 for n in remain:
1276 for n in remain:
1275 p1, p2 = self.changelog.parents(n)
1277 p1, p2 = self.changelog.parents(n)
1276 if p1 not in remain and p2 not in remain:
1278 if p1 not in remain and p2 not in remain:
1277 subset.append(n)
1279 subset.append(n)
1278 if heads:
1280 if heads:
1279 if p1 in heads:
1281 if p1 in heads:
1280 updated_heads[p1] = True
1282 updated_heads[p1] = True
1281 if p2 in heads:
1283 if p2 in heads:
1282 updated_heads[p2] = True
1284 updated_heads[p2] = True
1283
1285
1284 # this is the set of all roots we have to push
1286 # this is the set of all roots we have to push
1285 if heads:
1287 if heads:
1286 return subset, updated_heads.keys()
1288 return subset, updated_heads.keys()
1287 else:
1289 else:
1288 return subset
1290 return subset
1289
1291
1290 def pull(self, remote, heads=None, force=False, lock=None):
1292 def pull(self, remote, heads=None, force=False, lock=None):
1291 mylock = False
1293 mylock = False
1292 if not lock:
1294 if not lock:
1293 lock = self.lock()
1295 lock = self.lock()
1294 mylock = True
1296 mylock = True
1295
1297
1296 try:
1298 try:
1297 fetch = self.findincoming(remote, force=force)
1299 fetch = self.findincoming(remote, force=force)
1298 if fetch == [nullid]:
1300 if fetch == [nullid]:
1299 self.ui.status(_("requesting all changes\n"))
1301 self.ui.status(_("requesting all changes\n"))
1300
1302
1301 if not fetch:
1303 if not fetch:
1302 self.ui.status(_("no changes found\n"))
1304 self.ui.status(_("no changes found\n"))
1303 return 0
1305 return 0
1304
1306
1305 if heads is None:
1307 if heads is None:
1306 cg = remote.changegroup(fetch, 'pull')
1308 cg = remote.changegroup(fetch, 'pull')
1307 else:
1309 else:
1308 if 'changegroupsubset' not in remote.capabilities:
1310 if 'changegroupsubset' not in remote.capabilities:
1309 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1311 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1310 cg = remote.changegroupsubset(fetch, heads, 'pull')
1312 cg = remote.changegroupsubset(fetch, heads, 'pull')
1311 return self.addchangegroup(cg, 'pull', remote.url())
1313 return self.addchangegroup(cg, 'pull', remote.url())
1312 finally:
1314 finally:
1313 if mylock:
1315 if mylock:
1314 lock.release()
1316 lock.release()
1315
1317
1316 def push(self, remote, force=False, revs=None):
1318 def push(self, remote, force=False, revs=None):
1317 # there are two ways to push to remote repo:
1319 # there are two ways to push to remote repo:
1318 #
1320 #
1319 # addchangegroup assumes local user can lock remote
1321 # addchangegroup assumes local user can lock remote
1320 # repo (local filesystem, old ssh servers).
1322 # repo (local filesystem, old ssh servers).
1321 #
1323 #
1322 # unbundle assumes local user cannot lock remote repo (new ssh
1324 # unbundle assumes local user cannot lock remote repo (new ssh
1323 # servers, http servers).
1325 # servers, http servers).
1324
1326
1325 if remote.capable('unbundle'):
1327 if remote.capable('unbundle'):
1326 return self.push_unbundle(remote, force, revs)
1328 return self.push_unbundle(remote, force, revs)
1327 return self.push_addchangegroup(remote, force, revs)
1329 return self.push_addchangegroup(remote, force, revs)
1328
1330
1329 def prepush(self, remote, force, revs):
1331 def prepush(self, remote, force, revs):
1330 base = {}
1332 base = {}
1331 remote_heads = remote.heads()
1333 remote_heads = remote.heads()
1332 inc = self.findincoming(remote, base, remote_heads, force=force)
1334 inc = self.findincoming(remote, base, remote_heads, force=force)
1333
1335
1334 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1336 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1335 if revs is not None:
1337 if revs is not None:
1336 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1338 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1337 else:
1339 else:
1338 bases, heads = update, self.changelog.heads()
1340 bases, heads = update, self.changelog.heads()
1339
1341
1340 if not bases:
1342 if not bases:
1341 self.ui.status(_("no changes found\n"))
1343 self.ui.status(_("no changes found\n"))
1342 return None, 1
1344 return None, 1
1343 elif not force:
1345 elif not force:
1344 # check if we're creating new remote heads
1346 # check if we're creating new remote heads
1345 # to be a remote head after push, node must be either
1347 # to be a remote head after push, node must be either
1346 # - unknown locally
1348 # - unknown locally
1347 # - a local outgoing head descended from update
1349 # - a local outgoing head descended from update
1348 # - a remote head that's known locally and not
1350 # - a remote head that's known locally and not
1349 # ancestral to an outgoing head
1351 # ancestral to an outgoing head
1350
1352
1351 warn = 0
1353 warn = 0
1352
1354
1353 if remote_heads == [nullid]:
1355 if remote_heads == [nullid]:
1354 warn = 0
1356 warn = 0
1355 elif not revs and len(heads) > len(remote_heads):
1357 elif not revs and len(heads) > len(remote_heads):
1356 warn = 1
1358 warn = 1
1357 else:
1359 else:
1358 newheads = list(heads)
1360 newheads = list(heads)
1359 for r in remote_heads:
1361 for r in remote_heads:
1360 if r in self.changelog.nodemap:
1362 if r in self.changelog.nodemap:
1361 desc = self.changelog.heads(r, heads)
1363 desc = self.changelog.heads(r, heads)
1362 l = [h for h in heads if h in desc]
1364 l = [h for h in heads if h in desc]
1363 if not l:
1365 if not l:
1364 newheads.append(r)
1366 newheads.append(r)
1365 else:
1367 else:
1366 newheads.append(r)
1368 newheads.append(r)
1367 if len(newheads) > len(remote_heads):
1369 if len(newheads) > len(remote_heads):
1368 warn = 1
1370 warn = 1
1369
1371
1370 if warn:
1372 if warn:
1371 self.ui.warn(_("abort: push creates new remote branches!\n"))
1373 self.ui.warn(_("abort: push creates new remote branches!\n"))
1372 self.ui.status(_("(did you forget to merge?"
1374 self.ui.status(_("(did you forget to merge?"
1373 " use push -f to force)\n"))
1375 " use push -f to force)\n"))
1374 return None, 1
1376 return None, 1
1375 elif inc:
1377 elif inc:
1376 self.ui.warn(_("note: unsynced remote changes!\n"))
1378 self.ui.warn(_("note: unsynced remote changes!\n"))
1377
1379
1378
1380
1379 if revs is None:
1381 if revs is None:
1380 cg = self.changegroup(update, 'push')
1382 cg = self.changegroup(update, 'push')
1381 else:
1383 else:
1382 cg = self.changegroupsubset(update, revs, 'push')
1384 cg = self.changegroupsubset(update, revs, 'push')
1383 return cg, remote_heads
1385 return cg, remote_heads
1384
1386
1385 def push_addchangegroup(self, remote, force, revs):
1387 def push_addchangegroup(self, remote, force, revs):
1386 lock = remote.lock()
1388 lock = remote.lock()
1387
1389
1388 ret = self.prepush(remote, force, revs)
1390 ret = self.prepush(remote, force, revs)
1389 if ret[0] is not None:
1391 if ret[0] is not None:
1390 cg, remote_heads = ret
1392 cg, remote_heads = ret
1391 return remote.addchangegroup(cg, 'push', self.url())
1393 return remote.addchangegroup(cg, 'push', self.url())
1392 return ret[1]
1394 return ret[1]
1393
1395
1394 def push_unbundle(self, remote, force, revs):
1396 def push_unbundle(self, remote, force, revs):
1395 # local repo finds heads on server, finds out what revs it
1397 # local repo finds heads on server, finds out what revs it
1396 # must push. once revs transferred, if server finds it has
1398 # must push. once revs transferred, if server finds it has
1397 # different heads (someone else won commit/push race), server
1399 # different heads (someone else won commit/push race), server
1398 # aborts.
1400 # aborts.
1399
1401
1400 ret = self.prepush(remote, force, revs)
1402 ret = self.prepush(remote, force, revs)
1401 if ret[0] is not None:
1403 if ret[0] is not None:
1402 cg, remote_heads = ret
1404 cg, remote_heads = ret
1403 if force: remote_heads = ['force']
1405 if force: remote_heads = ['force']
1404 return remote.unbundle(cg, remote_heads, 'push')
1406 return remote.unbundle(cg, remote_heads, 'push')
1405 return ret[1]
1407 return ret[1]
1406
1408
1407 def changegroupinfo(self, nodes):
1409 def changegroupinfo(self, nodes):
1408 self.ui.note(_("%d changesets found\n") % len(nodes))
1410 self.ui.note(_("%d changesets found\n") % len(nodes))
1409 if self.ui.debugflag:
1411 if self.ui.debugflag:
1410 self.ui.debug(_("List of changesets:\n"))
1412 self.ui.debug(_("List of changesets:\n"))
1411 for node in nodes:
1413 for node in nodes:
1412 self.ui.debug("%s\n" % hex(node))
1414 self.ui.debug("%s\n" % hex(node))
1413
1415
1414 def changegroupsubset(self, bases, heads, source):
1416 def changegroupsubset(self, bases, heads, source):
1415 """This function generates a changegroup consisting of all the nodes
1417 """This function generates a changegroup consisting of all the nodes
1416 that are descendents of any of the bases, and ancestors of any of
1418 that are descendents of any of the bases, and ancestors of any of
1417 the heads.
1419 the heads.
1418
1420
1419 It is fairly complex as determining which filenodes and which
1421 It is fairly complex as determining which filenodes and which
1420 manifest nodes need to be included for the changeset to be complete
1422 manifest nodes need to be included for the changeset to be complete
1421 is non-trivial.
1423 is non-trivial.
1422
1424
1423 Another wrinkle is doing the reverse, figuring out which changeset in
1425 Another wrinkle is doing the reverse, figuring out which changeset in
1424 the changegroup a particular filenode or manifestnode belongs to."""
1426 the changegroup a particular filenode or manifestnode belongs to."""
1425
1427
1426 self.hook('preoutgoing', throw=True, source=source)
1428 self.hook('preoutgoing', throw=True, source=source)
1427
1429
1428 # Set up some initial variables
1430 # Set up some initial variables
1429 # Make it easy to refer to self.changelog
1431 # Make it easy to refer to self.changelog
1430 cl = self.changelog
1432 cl = self.changelog
1431 # msng is short for missing - compute the list of changesets in this
1433 # msng is short for missing - compute the list of changesets in this
1432 # changegroup.
1434 # changegroup.
1433 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1435 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1434 self.changegroupinfo(msng_cl_lst)
1436 self.changegroupinfo(msng_cl_lst)
1435 # Some bases may turn out to be superfluous, and some heads may be
1437 # Some bases may turn out to be superfluous, and some heads may be
1436 # too. nodesbetween will return the minimal set of bases and heads
1438 # too. nodesbetween will return the minimal set of bases and heads
1437 # necessary to re-create the changegroup.
1439 # necessary to re-create the changegroup.
1438
1440
1439 # Known heads are the list of heads that it is assumed the recipient
1441 # Known heads are the list of heads that it is assumed the recipient
1440 # of this changegroup will know about.
1442 # of this changegroup will know about.
1441 knownheads = {}
1443 knownheads = {}
1442 # We assume that all parents of bases are known heads.
1444 # We assume that all parents of bases are known heads.
1443 for n in bases:
1445 for n in bases:
1444 for p in cl.parents(n):
1446 for p in cl.parents(n):
1445 if p != nullid:
1447 if p != nullid:
1446 knownheads[p] = 1
1448 knownheads[p] = 1
1447 knownheads = knownheads.keys()
1449 knownheads = knownheads.keys()
1448 if knownheads:
1450 if knownheads:
1449 # Now that we know what heads are known, we can compute which
1451 # Now that we know what heads are known, we can compute which
1450 # changesets are known. The recipient must know about all
1452 # changesets are known. The recipient must know about all
1451 # changesets required to reach the known heads from the null
1453 # changesets required to reach the known heads from the null
1452 # changeset.
1454 # changeset.
1453 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1455 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1454 junk = None
1456 junk = None
1455 # Transform the list into an ersatz set.
1457 # Transform the list into an ersatz set.
1456 has_cl_set = dict.fromkeys(has_cl_set)
1458 has_cl_set = dict.fromkeys(has_cl_set)
1457 else:
1459 else:
1458 # If there were no known heads, the recipient cannot be assumed to
1460 # If there were no known heads, the recipient cannot be assumed to
1459 # know about any changesets.
1461 # know about any changesets.
1460 has_cl_set = {}
1462 has_cl_set = {}
1461
1463
1462 # Make it easy to refer to self.manifest
1464 # Make it easy to refer to self.manifest
1463 mnfst = self.manifest
1465 mnfst = self.manifest
1464 # We don't know which manifests are missing yet
1466 # We don't know which manifests are missing yet
1465 msng_mnfst_set = {}
1467 msng_mnfst_set = {}
1466 # Nor do we know which filenodes are missing.
1468 # Nor do we know which filenodes are missing.
1467 msng_filenode_set = {}
1469 msng_filenode_set = {}
1468
1470
1469 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1471 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1470 junk = None
1472 junk = None
1471
1473
1472 # A changeset always belongs to itself, so the changenode lookup
1474 # A changeset always belongs to itself, so the changenode lookup
1473 # function for a changenode is identity.
1475 # function for a changenode is identity.
1474 def identity(x):
1476 def identity(x):
1475 return x
1477 return x
1476
1478
1477 # A function generating function. Sets up an environment for the
1479 # A function generating function. Sets up an environment for the
1478 # inner function.
1480 # inner function.
1479 def cmp_by_rev_func(revlog):
1481 def cmp_by_rev_func(revlog):
1480 # Compare two nodes by their revision number in the environment's
1482 # Compare two nodes by their revision number in the environment's
1481 # revision history. Since the revision number both represents the
1483 # revision history. Since the revision number both represents the
1482 # most efficient order to read the nodes in, and represents a
1484 # most efficient order to read the nodes in, and represents a
1483 # topological sorting of the nodes, this function is often useful.
1485 # topological sorting of the nodes, this function is often useful.
1484 def cmp_by_rev(a, b):
1486 def cmp_by_rev(a, b):
1485 return cmp(revlog.rev(a), revlog.rev(b))
1487 return cmp(revlog.rev(a), revlog.rev(b))
1486 return cmp_by_rev
1488 return cmp_by_rev
1487
1489
1488 # If we determine that a particular file or manifest node must be a
1490 # If we determine that a particular file or manifest node must be a
1489 # node that the recipient of the changegroup will already have, we can
1491 # node that the recipient of the changegroup will already have, we can
1490 # also assume the recipient will have all the parents. This function
1492 # also assume the recipient will have all the parents. This function
1491 # prunes them from the set of missing nodes.
1493 # prunes them from the set of missing nodes.
1492 def prune_parents(revlog, hasset, msngset):
1494 def prune_parents(revlog, hasset, msngset):
1493 haslst = hasset.keys()
1495 haslst = hasset.keys()
1494 haslst.sort(cmp_by_rev_func(revlog))
1496 haslst.sort(cmp_by_rev_func(revlog))
1495 for node in haslst:
1497 for node in haslst:
1496 parentlst = [p for p in revlog.parents(node) if p != nullid]
1498 parentlst = [p for p in revlog.parents(node) if p != nullid]
1497 while parentlst:
1499 while parentlst:
1498 n = parentlst.pop()
1500 n = parentlst.pop()
1499 if n not in hasset:
1501 if n not in hasset:
1500 hasset[n] = 1
1502 hasset[n] = 1
1501 p = [p for p in revlog.parents(n) if p != nullid]
1503 p = [p for p in revlog.parents(n) if p != nullid]
1502 parentlst.extend(p)
1504 parentlst.extend(p)
1503 for n in hasset:
1505 for n in hasset:
1504 msngset.pop(n, None)
1506 msngset.pop(n, None)
1505
1507
1506 # This is a function generating function used to set up an environment
1508 # This is a function generating function used to set up an environment
1507 # for the inner function to execute in.
1509 # for the inner function to execute in.
1508 def manifest_and_file_collector(changedfileset):
1510 def manifest_and_file_collector(changedfileset):
1509 # This is an information gathering function that gathers
1511 # This is an information gathering function that gathers
1510 # information from each changeset node that goes out as part of
1512 # information from each changeset node that goes out as part of
1511 # the changegroup. The information gathered is a list of which
1513 # the changegroup. The information gathered is a list of which
1512 # manifest nodes are potentially required (the recipient may
1514 # manifest nodes are potentially required (the recipient may
1513 # already have them) and total list of all files which were
1515 # already have them) and total list of all files which were
1514 # changed in any changeset in the changegroup.
1516 # changed in any changeset in the changegroup.
1515 #
1517 #
1516 # We also remember the first changenode we saw any manifest
1518 # We also remember the first changenode we saw any manifest
1517 # referenced by so we can later determine which changenode 'owns'
1519 # referenced by so we can later determine which changenode 'owns'
1518 # the manifest.
1520 # the manifest.
1519 def collect_manifests_and_files(clnode):
1521 def collect_manifests_and_files(clnode):
1520 c = cl.read(clnode)
1522 c = cl.read(clnode)
1521 for f in c[3]:
1523 for f in c[3]:
1522 # This is to make sure we only have one instance of each
1524 # This is to make sure we only have one instance of each
1523 # filename string for each filename.
1525 # filename string for each filename.
1524 changedfileset.setdefault(f, f)
1526 changedfileset.setdefault(f, f)
1525 msng_mnfst_set.setdefault(c[0], clnode)
1527 msng_mnfst_set.setdefault(c[0], clnode)
1526 return collect_manifests_and_files
1528 return collect_manifests_and_files
1527
1529
1528 # Figure out which manifest nodes (of the ones we think might be part
1530 # Figure out which manifest nodes (of the ones we think might be part
1529 # of the changegroup) the recipient must know about and remove them
1531 # of the changegroup) the recipient must know about and remove them
1530 # from the changegroup.
1532 # from the changegroup.
1531 def prune_manifests():
1533 def prune_manifests():
1532 has_mnfst_set = {}
1534 has_mnfst_set = {}
1533 for n in msng_mnfst_set:
1535 for n in msng_mnfst_set:
1534 # If a 'missing' manifest thinks it belongs to a changenode
1536 # If a 'missing' manifest thinks it belongs to a changenode
1535 # the recipient is assumed to have, obviously the recipient
1537 # the recipient is assumed to have, obviously the recipient
1536 # must have that manifest.
1538 # must have that manifest.
1537 linknode = cl.node(mnfst.linkrev(n))
1539 linknode = cl.node(mnfst.linkrev(n))
1538 if linknode in has_cl_set:
1540 if linknode in has_cl_set:
1539 has_mnfst_set[n] = 1
1541 has_mnfst_set[n] = 1
1540 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1542 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1541
1543
1542 # Use the information collected in collect_manifests_and_files to say
1544 # Use the information collected in collect_manifests_and_files to say
1543 # which changenode any manifestnode belongs to.
1545 # which changenode any manifestnode belongs to.
1544 def lookup_manifest_link(mnfstnode):
1546 def lookup_manifest_link(mnfstnode):
1545 return msng_mnfst_set[mnfstnode]
1547 return msng_mnfst_set[mnfstnode]
1546
1548
1547 # A function generating function that sets up the initial environment
1549 # A function generating function that sets up the initial environment
1548 # the inner function.
1550 # the inner function.
1549 def filenode_collector(changedfiles):
1551 def filenode_collector(changedfiles):
1550 next_rev = [0]
1552 next_rev = [0]
1551 # This gathers information from each manifestnode included in the
1553 # This gathers information from each manifestnode included in the
1552 # changegroup about which filenodes the manifest node references
1554 # changegroup about which filenodes the manifest node references
1553 # so we can include those in the changegroup too.
1555 # so we can include those in the changegroup too.
1554 #
1556 #
1555 # It also remembers which changenode each filenode belongs to. It
1557 # It also remembers which changenode each filenode belongs to. It
1556 # does this by assuming the a filenode belongs to the changenode
1558 # does this by assuming the a filenode belongs to the changenode
1557 # the first manifest that references it belongs to.
1559 # the first manifest that references it belongs to.
1558 def collect_msng_filenodes(mnfstnode):
1560 def collect_msng_filenodes(mnfstnode):
1559 r = mnfst.rev(mnfstnode)
1561 r = mnfst.rev(mnfstnode)
1560 if r == next_rev[0]:
1562 if r == next_rev[0]:
1561 # If the last rev we looked at was the one just previous,
1563 # If the last rev we looked at was the one just previous,
1562 # we only need to see a diff.
1564 # we only need to see a diff.
1563 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1565 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1564 # For each line in the delta
1566 # For each line in the delta
1565 for dline in delta.splitlines():
1567 for dline in delta.splitlines():
1566 # get the filename and filenode for that line
1568 # get the filename and filenode for that line
1567 f, fnode = dline.split('\0')
1569 f, fnode = dline.split('\0')
1568 fnode = bin(fnode[:40])
1570 fnode = bin(fnode[:40])
1569 f = changedfiles.get(f, None)
1571 f = changedfiles.get(f, None)
1570 # And if the file is in the list of files we care
1572 # And if the file is in the list of files we care
1571 # about.
1573 # about.
1572 if f is not None:
1574 if f is not None:
1573 # Get the changenode this manifest belongs to
1575 # Get the changenode this manifest belongs to
1574 clnode = msng_mnfst_set[mnfstnode]
1576 clnode = msng_mnfst_set[mnfstnode]
1575 # Create the set of filenodes for the file if
1577 # Create the set of filenodes for the file if
1576 # there isn't one already.
1578 # there isn't one already.
1577 ndset = msng_filenode_set.setdefault(f, {})
1579 ndset = msng_filenode_set.setdefault(f, {})
1578 # And set the filenode's changelog node to the
1580 # And set the filenode's changelog node to the
1579 # manifest's if it hasn't been set already.
1581 # manifest's if it hasn't been set already.
1580 ndset.setdefault(fnode, clnode)
1582 ndset.setdefault(fnode, clnode)
1581 else:
1583 else:
1582 # Otherwise we need a full manifest.
1584 # Otherwise we need a full manifest.
1583 m = mnfst.read(mnfstnode)
1585 m = mnfst.read(mnfstnode)
1584 # For every file in we care about.
1586 # For every file in we care about.
1585 for f in changedfiles:
1587 for f in changedfiles:
1586 fnode = m.get(f, None)
1588 fnode = m.get(f, None)
1587 # If it's in the manifest
1589 # If it's in the manifest
1588 if fnode is not None:
1590 if fnode is not None:
1589 # See comments above.
1591 # See comments above.
1590 clnode = msng_mnfst_set[mnfstnode]
1592 clnode = msng_mnfst_set[mnfstnode]
1591 ndset = msng_filenode_set.setdefault(f, {})
1593 ndset = msng_filenode_set.setdefault(f, {})
1592 ndset.setdefault(fnode, clnode)
1594 ndset.setdefault(fnode, clnode)
1593 # Remember the revision we hope to see next.
1595 # Remember the revision we hope to see next.
1594 next_rev[0] = r + 1
1596 next_rev[0] = r + 1
1595 return collect_msng_filenodes
1597 return collect_msng_filenodes
1596
1598
1597 # We have a list of filenodes we think we need for a file, lets remove
1599 # We have a list of filenodes we think we need for a file, lets remove
1598 # all those we now the recipient must have.
1600 # all those we now the recipient must have.
1599 def prune_filenodes(f, filerevlog):
1601 def prune_filenodes(f, filerevlog):
1600 msngset = msng_filenode_set[f]
1602 msngset = msng_filenode_set[f]
1601 hasset = {}
1603 hasset = {}
1602 # If a 'missing' filenode thinks it belongs to a changenode we
1604 # If a 'missing' filenode thinks it belongs to a changenode we
1603 # assume the recipient must have, then the recipient must have
1605 # assume the recipient must have, then the recipient must have
1604 # that filenode.
1606 # that filenode.
1605 for n in msngset:
1607 for n in msngset:
1606 clnode = cl.node(filerevlog.linkrev(n))
1608 clnode = cl.node(filerevlog.linkrev(n))
1607 if clnode in has_cl_set:
1609 if clnode in has_cl_set:
1608 hasset[n] = 1
1610 hasset[n] = 1
1609 prune_parents(filerevlog, hasset, msngset)
1611 prune_parents(filerevlog, hasset, msngset)
1610
1612
1611 # A function generator function that sets up the a context for the
1613 # A function generator function that sets up the a context for the
1612 # inner function.
1614 # inner function.
1613 def lookup_filenode_link_func(fname):
1615 def lookup_filenode_link_func(fname):
1614 msngset = msng_filenode_set[fname]
1616 msngset = msng_filenode_set[fname]
1615 # Lookup the changenode the filenode belongs to.
1617 # Lookup the changenode the filenode belongs to.
1616 def lookup_filenode_link(fnode):
1618 def lookup_filenode_link(fnode):
1617 return msngset[fnode]
1619 return msngset[fnode]
1618 return lookup_filenode_link
1620 return lookup_filenode_link
1619
1621
1620 # Now that we have all theses utility functions to help out and
1622 # Now that we have all theses utility functions to help out and
1621 # logically divide up the task, generate the group.
1623 # logically divide up the task, generate the group.
1622 def gengroup():
1624 def gengroup():
1623 # The set of changed files starts empty.
1625 # The set of changed files starts empty.
1624 changedfiles = {}
1626 changedfiles = {}
1625 # Create a changenode group generator that will call our functions
1627 # Create a changenode group generator that will call our functions
1626 # back to lookup the owning changenode and collect information.
1628 # back to lookup the owning changenode and collect information.
1627 group = cl.group(msng_cl_lst, identity,
1629 group = cl.group(msng_cl_lst, identity,
1628 manifest_and_file_collector(changedfiles))
1630 manifest_and_file_collector(changedfiles))
1629 for chnk in group:
1631 for chnk in group:
1630 yield chnk
1632 yield chnk
1631
1633
1632 # The list of manifests has been collected by the generator
1634 # The list of manifests has been collected by the generator
1633 # calling our functions back.
1635 # calling our functions back.
1634 prune_manifests()
1636 prune_manifests()
1635 msng_mnfst_lst = msng_mnfst_set.keys()
1637 msng_mnfst_lst = msng_mnfst_set.keys()
1636 # Sort the manifestnodes by revision number.
1638 # Sort the manifestnodes by revision number.
1637 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1639 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1638 # Create a generator for the manifestnodes that calls our lookup
1640 # Create a generator for the manifestnodes that calls our lookup
1639 # and data collection functions back.
1641 # and data collection functions back.
1640 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1642 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1641 filenode_collector(changedfiles))
1643 filenode_collector(changedfiles))
1642 for chnk in group:
1644 for chnk in group:
1643 yield chnk
1645 yield chnk
1644
1646
1645 # These are no longer needed, dereference and toss the memory for
1647 # These are no longer needed, dereference and toss the memory for
1646 # them.
1648 # them.
1647 msng_mnfst_lst = None
1649 msng_mnfst_lst = None
1648 msng_mnfst_set.clear()
1650 msng_mnfst_set.clear()
1649
1651
1650 changedfiles = changedfiles.keys()
1652 changedfiles = changedfiles.keys()
1651 changedfiles.sort()
1653 changedfiles.sort()
1652 # Go through all our files in order sorted by name.
1654 # Go through all our files in order sorted by name.
1653 for fname in changedfiles:
1655 for fname in changedfiles:
1654 filerevlog = self.file(fname)
1656 filerevlog = self.file(fname)
1655 # Toss out the filenodes that the recipient isn't really
1657 # Toss out the filenodes that the recipient isn't really
1656 # missing.
1658 # missing.
1657 if msng_filenode_set.has_key(fname):
1659 if msng_filenode_set.has_key(fname):
1658 prune_filenodes(fname, filerevlog)
1660 prune_filenodes(fname, filerevlog)
1659 msng_filenode_lst = msng_filenode_set[fname].keys()
1661 msng_filenode_lst = msng_filenode_set[fname].keys()
1660 else:
1662 else:
1661 msng_filenode_lst = []
1663 msng_filenode_lst = []
1662 # If any filenodes are left, generate the group for them,
1664 # If any filenodes are left, generate the group for them,
1663 # otherwise don't bother.
1665 # otherwise don't bother.
1664 if len(msng_filenode_lst) > 0:
1666 if len(msng_filenode_lst) > 0:
1665 yield changegroup.genchunk(fname)
1667 yield changegroup.genchunk(fname)
1666 # Sort the filenodes by their revision #
1668 # Sort the filenodes by their revision #
1667 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1669 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1668 # Create a group generator and only pass in a changenode
1670 # Create a group generator and only pass in a changenode
1669 # lookup function as we need to collect no information
1671 # lookup function as we need to collect no information
1670 # from filenodes.
1672 # from filenodes.
1671 group = filerevlog.group(msng_filenode_lst,
1673 group = filerevlog.group(msng_filenode_lst,
1672 lookup_filenode_link_func(fname))
1674 lookup_filenode_link_func(fname))
1673 for chnk in group:
1675 for chnk in group:
1674 yield chnk
1676 yield chnk
1675 if msng_filenode_set.has_key(fname):
1677 if msng_filenode_set.has_key(fname):
1676 # Don't need this anymore, toss it to free memory.
1678 # Don't need this anymore, toss it to free memory.
1677 del msng_filenode_set[fname]
1679 del msng_filenode_set[fname]
1678 # Signal that no more groups are left.
1680 # Signal that no more groups are left.
1679 yield changegroup.closechunk()
1681 yield changegroup.closechunk()
1680
1682
1681 if msng_cl_lst:
1683 if msng_cl_lst:
1682 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1684 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1683
1685
1684 return util.chunkbuffer(gengroup())
1686 return util.chunkbuffer(gengroup())
1685
1687
1686 def changegroup(self, basenodes, source):
1688 def changegroup(self, basenodes, source):
1687 """Generate a changegroup of all nodes that we have that a recipient
1689 """Generate a changegroup of all nodes that we have that a recipient
1688 doesn't.
1690 doesn't.
1689
1691
1690 This is much easier than the previous function as we can assume that
1692 This is much easier than the previous function as we can assume that
1691 the recipient has any changenode we aren't sending them."""
1693 the recipient has any changenode we aren't sending them."""
1692
1694
1693 self.hook('preoutgoing', throw=True, source=source)
1695 self.hook('preoutgoing', throw=True, source=source)
1694
1696
1695 cl = self.changelog
1697 cl = self.changelog
1696 nodes = cl.nodesbetween(basenodes, None)[0]
1698 nodes = cl.nodesbetween(basenodes, None)[0]
1697 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1699 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1698 self.changegroupinfo(nodes)
1700 self.changegroupinfo(nodes)
1699
1701
1700 def identity(x):
1702 def identity(x):
1701 return x
1703 return x
1702
1704
1703 def gennodelst(revlog):
1705 def gennodelst(revlog):
1704 for r in xrange(0, revlog.count()):
1706 for r in xrange(0, revlog.count()):
1705 n = revlog.node(r)
1707 n = revlog.node(r)
1706 if revlog.linkrev(n) in revset:
1708 if revlog.linkrev(n) in revset:
1707 yield n
1709 yield n
1708
1710
1709 def changed_file_collector(changedfileset):
1711 def changed_file_collector(changedfileset):
1710 def collect_changed_files(clnode):
1712 def collect_changed_files(clnode):
1711 c = cl.read(clnode)
1713 c = cl.read(clnode)
1712 for fname in c[3]:
1714 for fname in c[3]:
1713 changedfileset[fname] = 1
1715 changedfileset[fname] = 1
1714 return collect_changed_files
1716 return collect_changed_files
1715
1717
1716 def lookuprevlink_func(revlog):
1718 def lookuprevlink_func(revlog):
1717 def lookuprevlink(n):
1719 def lookuprevlink(n):
1718 return cl.node(revlog.linkrev(n))
1720 return cl.node(revlog.linkrev(n))
1719 return lookuprevlink
1721 return lookuprevlink
1720
1722
1721 def gengroup():
1723 def gengroup():
1722 # construct a list of all changed files
1724 # construct a list of all changed files
1723 changedfiles = {}
1725 changedfiles = {}
1724
1726
1725 for chnk in cl.group(nodes, identity,
1727 for chnk in cl.group(nodes, identity,
1726 changed_file_collector(changedfiles)):
1728 changed_file_collector(changedfiles)):
1727 yield chnk
1729 yield chnk
1728 changedfiles = changedfiles.keys()
1730 changedfiles = changedfiles.keys()
1729 changedfiles.sort()
1731 changedfiles.sort()
1730
1732
1731 mnfst = self.manifest
1733 mnfst = self.manifest
1732 nodeiter = gennodelst(mnfst)
1734 nodeiter = gennodelst(mnfst)
1733 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1735 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1734 yield chnk
1736 yield chnk
1735
1737
1736 for fname in changedfiles:
1738 for fname in changedfiles:
1737 filerevlog = self.file(fname)
1739 filerevlog = self.file(fname)
1738 nodeiter = gennodelst(filerevlog)
1740 nodeiter = gennodelst(filerevlog)
1739 nodeiter = list(nodeiter)
1741 nodeiter = list(nodeiter)
1740 if nodeiter:
1742 if nodeiter:
1741 yield changegroup.genchunk(fname)
1743 yield changegroup.genchunk(fname)
1742 lookup = lookuprevlink_func(filerevlog)
1744 lookup = lookuprevlink_func(filerevlog)
1743 for chnk in filerevlog.group(nodeiter, lookup):
1745 for chnk in filerevlog.group(nodeiter, lookup):
1744 yield chnk
1746 yield chnk
1745
1747
1746 yield changegroup.closechunk()
1748 yield changegroup.closechunk()
1747
1749
1748 if nodes:
1750 if nodes:
1749 self.hook('outgoing', node=hex(nodes[0]), source=source)
1751 self.hook('outgoing', node=hex(nodes[0]), source=source)
1750
1752
1751 return util.chunkbuffer(gengroup())
1753 return util.chunkbuffer(gengroup())
1752
1754
1753 def addchangegroup(self, source, srctype, url):
1755 def addchangegroup(self, source, srctype, url):
1754 """add changegroup to repo.
1756 """add changegroup to repo.
1755
1757
1756 return values:
1758 return values:
1757 - nothing changed or no source: 0
1759 - nothing changed or no source: 0
1758 - more heads than before: 1+added heads (2..n)
1760 - more heads than before: 1+added heads (2..n)
1759 - less heads than before: -1-removed heads (-2..-n)
1761 - less heads than before: -1-removed heads (-2..-n)
1760 - number of heads stays the same: 1
1762 - number of heads stays the same: 1
1761 """
1763 """
1762 def csmap(x):
1764 def csmap(x):
1763 self.ui.debug(_("add changeset %s\n") % short(x))
1765 self.ui.debug(_("add changeset %s\n") % short(x))
1764 return cl.count()
1766 return cl.count()
1765
1767
1766 def revmap(x):
1768 def revmap(x):
1767 return cl.rev(x)
1769 return cl.rev(x)
1768
1770
1769 if not source:
1771 if not source:
1770 return 0
1772 return 0
1771
1773
1772 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1774 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1773
1775
1774 changesets = files = revisions = 0
1776 changesets = files = revisions = 0
1775
1777
1776 tr = self.transaction()
1778 tr = self.transaction()
1777
1779
1778 # write changelog data to temp files so concurrent readers will not see
1780 # write changelog data to temp files so concurrent readers will not see
1779 # inconsistent view
1781 # inconsistent view
1780 cl = self.changelog
1782 cl = self.changelog
1781 cl.delayupdate()
1783 cl.delayupdate()
1782 oldheads = len(cl.heads())
1784 oldheads = len(cl.heads())
1783
1785
1784 # pull off the changeset group
1786 # pull off the changeset group
1785 self.ui.status(_("adding changesets\n"))
1787 self.ui.status(_("adding changesets\n"))
1786 cor = cl.count() - 1
1788 cor = cl.count() - 1
1787 chunkiter = changegroup.chunkiter(source)
1789 chunkiter = changegroup.chunkiter(source)
1788 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1790 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1789 raise util.Abort(_("received changelog group is empty"))
1791 raise util.Abort(_("received changelog group is empty"))
1790 cnr = cl.count() - 1
1792 cnr = cl.count() - 1
1791 changesets = cnr - cor
1793 changesets = cnr - cor
1792
1794
1793 # pull off the manifest group
1795 # pull off the manifest group
1794 self.ui.status(_("adding manifests\n"))
1796 self.ui.status(_("adding manifests\n"))
1795 chunkiter = changegroup.chunkiter(source)
1797 chunkiter = changegroup.chunkiter(source)
1796 # no need to check for empty manifest group here:
1798 # no need to check for empty manifest group here:
1797 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1799 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1798 # no new manifest will be created and the manifest group will
1800 # no new manifest will be created and the manifest group will
1799 # be empty during the pull
1801 # be empty during the pull
1800 self.manifest.addgroup(chunkiter, revmap, tr)
1802 self.manifest.addgroup(chunkiter, revmap, tr)
1801
1803
1802 # process the files
1804 # process the files
1803 self.ui.status(_("adding file changes\n"))
1805 self.ui.status(_("adding file changes\n"))
1804 while 1:
1806 while 1:
1805 f = changegroup.getchunk(source)
1807 f = changegroup.getchunk(source)
1806 if not f:
1808 if not f:
1807 break
1809 break
1808 self.ui.debug(_("adding %s revisions\n") % f)
1810 self.ui.debug(_("adding %s revisions\n") % f)
1809 fl = self.file(f)
1811 fl = self.file(f)
1810 o = fl.count()
1812 o = fl.count()
1811 chunkiter = changegroup.chunkiter(source)
1813 chunkiter = changegroup.chunkiter(source)
1812 if fl.addgroup(chunkiter, revmap, tr) is None:
1814 if fl.addgroup(chunkiter, revmap, tr) is None:
1813 raise util.Abort(_("received file revlog group is empty"))
1815 raise util.Abort(_("received file revlog group is empty"))
1814 revisions += fl.count() - o
1816 revisions += fl.count() - o
1815 files += 1
1817 files += 1
1816
1818
1817 # make changelog see real files again
1819 # make changelog see real files again
1818 cl.finalize(tr)
1820 cl.finalize(tr)
1819
1821
1820 newheads = len(self.changelog.heads())
1822 newheads = len(self.changelog.heads())
1821 heads = ""
1823 heads = ""
1822 if oldheads and newheads != oldheads:
1824 if oldheads and newheads != oldheads:
1823 heads = _(" (%+d heads)") % (newheads - oldheads)
1825 heads = _(" (%+d heads)") % (newheads - oldheads)
1824
1826
1825 self.ui.status(_("added %d changesets"
1827 self.ui.status(_("added %d changesets"
1826 " with %d changes to %d files%s\n")
1828 " with %d changes to %d files%s\n")
1827 % (changesets, revisions, files, heads))
1829 % (changesets, revisions, files, heads))
1828
1830
1829 if changesets > 0:
1831 if changesets > 0:
1830 self.hook('pretxnchangegroup', throw=True,
1832 self.hook('pretxnchangegroup', throw=True,
1831 node=hex(self.changelog.node(cor+1)), source=srctype,
1833 node=hex(self.changelog.node(cor+1)), source=srctype,
1832 url=url)
1834 url=url)
1833
1835
1834 tr.close()
1836 tr.close()
1835
1837
1836 if changesets > 0:
1838 if changesets > 0:
1837 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1839 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1838 source=srctype, url=url)
1840 source=srctype, url=url)
1839
1841
1840 for i in xrange(cor + 1, cnr + 1):
1842 for i in xrange(cor + 1, cnr + 1):
1841 self.hook("incoming", node=hex(self.changelog.node(i)),
1843 self.hook("incoming", node=hex(self.changelog.node(i)),
1842 source=srctype, url=url)
1844 source=srctype, url=url)
1843
1845
1844 # never return 0 here:
1846 # never return 0 here:
1845 if newheads < oldheads:
1847 if newheads < oldheads:
1846 return newheads - oldheads - 1
1848 return newheads - oldheads - 1
1847 else:
1849 else:
1848 return newheads - oldheads + 1
1850 return newheads - oldheads + 1
1849
1851
1850
1852
1851 def stream_in(self, remote):
1853 def stream_in(self, remote):
1852 fp = remote.stream_out()
1854 fp = remote.stream_out()
1853 l = fp.readline()
1855 l = fp.readline()
1854 try:
1856 try:
1855 resp = int(l)
1857 resp = int(l)
1856 except ValueError:
1858 except ValueError:
1857 raise util.UnexpectedOutput(
1859 raise util.UnexpectedOutput(
1858 _('Unexpected response from remote server:'), l)
1860 _('Unexpected response from remote server:'), l)
1859 if resp == 1:
1861 if resp == 1:
1860 raise util.Abort(_('operation forbidden by server'))
1862 raise util.Abort(_('operation forbidden by server'))
1861 elif resp == 2:
1863 elif resp == 2:
1862 raise util.Abort(_('locking the remote repository failed'))
1864 raise util.Abort(_('locking the remote repository failed'))
1863 elif resp != 0:
1865 elif resp != 0:
1864 raise util.Abort(_('the server sent an unknown error code'))
1866 raise util.Abort(_('the server sent an unknown error code'))
1865 self.ui.status(_('streaming all changes\n'))
1867 self.ui.status(_('streaming all changes\n'))
1866 l = fp.readline()
1868 l = fp.readline()
1867 try:
1869 try:
1868 total_files, total_bytes = map(int, l.split(' ', 1))
1870 total_files, total_bytes = map(int, l.split(' ', 1))
1869 except ValueError, TypeError:
1871 except ValueError, TypeError:
1870 raise util.UnexpectedOutput(
1872 raise util.UnexpectedOutput(
1871 _('Unexpected response from remote server:'), l)
1873 _('Unexpected response from remote server:'), l)
1872 self.ui.status(_('%d files to transfer, %s of data\n') %
1874 self.ui.status(_('%d files to transfer, %s of data\n') %
1873 (total_files, util.bytecount(total_bytes)))
1875 (total_files, util.bytecount(total_bytes)))
1874 start = time.time()
1876 start = time.time()
1875 for i in xrange(total_files):
1877 for i in xrange(total_files):
1876 # XXX doesn't support '\n' or '\r' in filenames
1878 # XXX doesn't support '\n' or '\r' in filenames
1877 l = fp.readline()
1879 l = fp.readline()
1878 try:
1880 try:
1879 name, size = l.split('\0', 1)
1881 name, size = l.split('\0', 1)
1880 size = int(size)
1882 size = int(size)
1881 except ValueError, TypeError:
1883 except ValueError, TypeError:
1882 raise util.UnexpectedOutput(
1884 raise util.UnexpectedOutput(
1883 _('Unexpected response from remote server:'), l)
1885 _('Unexpected response from remote server:'), l)
1884 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1886 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1885 ofp = self.sopener(name, 'w')
1887 ofp = self.sopener(name, 'w')
1886 for chunk in util.filechunkiter(fp, limit=size):
1888 for chunk in util.filechunkiter(fp, limit=size):
1887 ofp.write(chunk)
1889 ofp.write(chunk)
1888 ofp.close()
1890 ofp.close()
1889 elapsed = time.time() - start
1891 elapsed = time.time() - start
1890 if elapsed <= 0:
1892 if elapsed <= 0:
1891 elapsed = 0.001
1893 elapsed = 0.001
1892 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1894 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1893 (util.bytecount(total_bytes), elapsed,
1895 (util.bytecount(total_bytes), elapsed,
1894 util.bytecount(total_bytes / elapsed)))
1896 util.bytecount(total_bytes / elapsed)))
1895 self.invalidate()
1897 self.invalidate()
1896 return len(self.heads()) + 1
1898 return len(self.heads()) + 1
1897
1899
1898 def clone(self, remote, heads=[], stream=False):
1900 def clone(self, remote, heads=[], stream=False):
1899 '''clone remote repository.
1901 '''clone remote repository.
1900
1902
1901 keyword arguments:
1903 keyword arguments:
1902 heads: list of revs to clone (forces use of pull)
1904 heads: list of revs to clone (forces use of pull)
1903 stream: use streaming clone if possible'''
1905 stream: use streaming clone if possible'''
1904
1906
1905 # now, all clients that can request uncompressed clones can
1907 # now, all clients that can request uncompressed clones can
1906 # read repo formats supported by all servers that can serve
1908 # read repo formats supported by all servers that can serve
1907 # them.
1909 # them.
1908
1910
1909 # if revlog format changes, client will have to check version
1911 # if revlog format changes, client will have to check version
1910 # and format flags on "stream" capability, and use
1912 # and format flags on "stream" capability, and use
1911 # uncompressed only if compatible.
1913 # uncompressed only if compatible.
1912
1914
1913 if stream and not heads and remote.capable('stream'):
1915 if stream and not heads and remote.capable('stream'):
1914 return self.stream_in(remote)
1916 return self.stream_in(remote)
1915 return self.pull(remote, heads)
1917 return self.pull(remote, heads)
1916
1918
1917 # used to avoid circular references so destructors work
1919 # used to avoid circular references so destructors work
1918 def aftertrans(files):
1920 def aftertrans(files):
1919 renamefiles = [tuple(t) for t in files]
1921 renamefiles = [tuple(t) for t in files]
1920 def a():
1922 def a():
1921 for src, dest in renamefiles:
1923 for src, dest in renamefiles:
1922 util.rename(src, dest)
1924 util.rename(src, dest)
1923 return a
1925 return a
1924
1926
1925 def instance(ui, path, create):
1927 def instance(ui, path, create):
1926 return localrepository(ui, util.drop_scheme('file', path), create)
1928 return localrepository(ui, util.drop_scheme('file', path), create)
1927
1929
1928 def islocal(path):
1930 def islocal(path):
1929 return True
1931 return True
General Comments 0
You need to be logged in to leave comments. Login now