##// END OF EJS Templates
localrepository: allow rawcommit to commit with an empty message
Bryan O'Sullivan -
r5041:49059086 default
parent child Browse files
Show More
@@ -1,1986 +1,1986 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.path = path
21 self.path = path
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 # setup store
64 if "store" in requirements:
64 if "store" in requirements:
65 self.encodefn = util.encodefilename
65 self.encodefn = util.encodefilename
66 self.decodefn = util.decodefilename
66 self.decodefn = util.decodefilename
67 self.spath = os.path.join(self.path, "store")
67 self.spath = os.path.join(self.path, "store")
68 else:
68 else:
69 self.encodefn = lambda x: x
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
71 self.spath = self.path
72 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
72 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
73
73
74 self.ui = ui.ui(parentui=parentui)
74 self.ui = ui.ui(parentui=parentui)
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._transref = self._lockref = self._wlockref = None
85 self._transref = self._lockref = self._wlockref = None
86
86
87 def __getattr__(self, name):
87 def __getattr__(self, name):
88 if name == 'changelog':
88 if name == 'changelog':
89 self.changelog = changelog.changelog(self.sopener)
89 self.changelog = changelog.changelog(self.sopener)
90 self.sopener.defversion = self.changelog.version
90 self.sopener.defversion = self.changelog.version
91 return self.changelog
91 return self.changelog
92 if name == 'manifest':
92 if name == 'manifest':
93 self.changelog
93 self.changelog
94 self.manifest = manifest.manifest(self.sopener)
94 self.manifest = manifest.manifest(self.sopener)
95 return self.manifest
95 return self.manifest
96 if name == 'dirstate':
96 if name == 'dirstate':
97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 return self.dirstate
98 return self.dirstate
99 else:
99 else:
100 raise AttributeError, name
100 raise AttributeError, name
101
101
102 def url(self):
102 def url(self):
103 return 'file:' + self.root
103 return 'file:' + self.root
104
104
105 def hook(self, name, throw=False, **args):
105 def hook(self, name, throw=False, **args):
106 return hook.hook(self.ui, self, name, throw, **args)
106 return hook.hook(self.ui, self, name, throw, **args)
107
107
108 tag_disallowed = ':\r\n'
108 tag_disallowed = ':\r\n'
109
109
110 def _tag(self, name, node, message, local, user, date, parent=None,
110 def _tag(self, name, node, message, local, user, date, parent=None,
111 extra={}):
111 extra={}):
112 use_dirstate = parent is None
112 use_dirstate = parent is None
113
113
114 for c in self.tag_disallowed:
114 for c in self.tag_disallowed:
115 if c in name:
115 if c in name:
116 raise util.Abort(_('%r cannot be used in a tag name') % c)
116 raise util.Abort(_('%r cannot be used in a tag name') % c)
117
117
118 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
118 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119
119
120 def writetag(fp, name, munge, prevtags):
120 def writetag(fp, name, munge, prevtags):
121 if prevtags and prevtags[-1] != '\n':
121 if prevtags and prevtags[-1] != '\n':
122 fp.write('\n')
122 fp.write('\n')
123 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
123 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 fp.close()
124 fp.close()
125 self.hook('tag', node=hex(node), tag=name, local=local)
125 self.hook('tag', node=hex(node), tag=name, local=local)
126
126
127 prevtags = ''
127 prevtags = ''
128 if local:
128 if local:
129 try:
129 try:
130 fp = self.opener('localtags', 'r+')
130 fp = self.opener('localtags', 'r+')
131 except IOError, err:
131 except IOError, err:
132 fp = self.opener('localtags', 'a')
132 fp = self.opener('localtags', 'a')
133 else:
133 else:
134 prevtags = fp.read()
134 prevtags = fp.read()
135
135
136 # local tags are stored in the current charset
136 # local tags are stored in the current charset
137 writetag(fp, name, None, prevtags)
137 writetag(fp, name, None, prevtags)
138 return
138 return
139
139
140 if use_dirstate:
140 if use_dirstate:
141 try:
141 try:
142 fp = self.wfile('.hgtags', 'rb+')
142 fp = self.wfile('.hgtags', 'rb+')
143 except IOError, err:
143 except IOError, err:
144 fp = self.wfile('.hgtags', 'ab')
144 fp = self.wfile('.hgtags', 'ab')
145 else:
145 else:
146 prevtags = fp.read()
146 prevtags = fp.read()
147 else:
147 else:
148 try:
148 try:
149 prevtags = self.filectx('.hgtags', parent).data()
149 prevtags = self.filectx('.hgtags', parent).data()
150 except revlog.LookupError:
150 except revlog.LookupError:
151 pass
151 pass
152 fp = self.wfile('.hgtags', 'wb')
152 fp = self.wfile('.hgtags', 'wb')
153
153
154 # committed tags are stored in UTF-8
154 # committed tags are stored in UTF-8
155 writetag(fp, name, util.fromlocal, prevtags)
155 writetag(fp, name, util.fromlocal, prevtags)
156
156
157 if use_dirstate and '.hgtags' not in self.dirstate:
157 if use_dirstate and '.hgtags' not in self.dirstate:
158 self.add(['.hgtags'])
158 self.add(['.hgtags'])
159
159
160 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
160 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
161 extra=extra)
161 extra=extra)
162
162
163 self.hook('tag', node=hex(node), tag=name, local=local)
163 self.hook('tag', node=hex(node), tag=name, local=local)
164
164
165 return tagnode
165 return tagnode
166
166
167 def tag(self, name, node, message, local, user, date):
167 def tag(self, name, node, message, local, user, date):
168 '''tag a revision with a symbolic name.
168 '''tag a revision with a symbolic name.
169
169
170 if local is True, the tag is stored in a per-repository file.
170 if local is True, the tag is stored in a per-repository file.
171 otherwise, it is stored in the .hgtags file, and a new
171 otherwise, it is stored in the .hgtags file, and a new
172 changeset is committed with the change.
172 changeset is committed with the change.
173
173
174 keyword arguments:
174 keyword arguments:
175
175
176 local: whether to store tag in non-version-controlled file
176 local: whether to store tag in non-version-controlled file
177 (default False)
177 (default False)
178
178
179 message: commit message to use if committing
179 message: commit message to use if committing
180
180
181 user: name of user to use if committing
181 user: name of user to use if committing
182
182
183 date: date tuple to use if committing'''
183 date: date tuple to use if committing'''
184
184
185 for x in self.status()[:5]:
185 for x in self.status()[:5]:
186 if '.hgtags' in x:
186 if '.hgtags' in x:
187 raise util.Abort(_('working copy of .hgtags is changed '
187 raise util.Abort(_('working copy of .hgtags is changed '
188 '(please commit .hgtags manually)'))
188 '(please commit .hgtags manually)'))
189
189
190
190
191 self._tag(name, node, message, local, user, date)
191 self._tag(name, node, message, local, user, date)
192
192
193 def tags(self):
193 def tags(self):
194 '''return a mapping of tag to node'''
194 '''return a mapping of tag to node'''
195 if self.tagscache:
195 if self.tagscache:
196 return self.tagscache
196 return self.tagscache
197
197
198 globaltags = {}
198 globaltags = {}
199
199
200 def readtags(lines, fn):
200 def readtags(lines, fn):
201 filetags = {}
201 filetags = {}
202 count = 0
202 count = 0
203
203
204 def warn(msg):
204 def warn(msg):
205 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
205 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
206
206
207 for l in lines:
207 for l in lines:
208 count += 1
208 count += 1
209 if not l:
209 if not l:
210 continue
210 continue
211 s = l.split(" ", 1)
211 s = l.split(" ", 1)
212 if len(s) != 2:
212 if len(s) != 2:
213 warn(_("cannot parse entry"))
213 warn(_("cannot parse entry"))
214 continue
214 continue
215 node, key = s
215 node, key = s
216 key = util.tolocal(key.strip()) # stored in UTF-8
216 key = util.tolocal(key.strip()) # stored in UTF-8
217 try:
217 try:
218 bin_n = bin(node)
218 bin_n = bin(node)
219 except TypeError:
219 except TypeError:
220 warn(_("node '%s' is not well formed") % node)
220 warn(_("node '%s' is not well formed") % node)
221 continue
221 continue
222 if bin_n not in self.changelog.nodemap:
222 if bin_n not in self.changelog.nodemap:
223 warn(_("tag '%s' refers to unknown node") % key)
223 warn(_("tag '%s' refers to unknown node") % key)
224 continue
224 continue
225
225
226 h = []
226 h = []
227 if key in filetags:
227 if key in filetags:
228 n, h = filetags[key]
228 n, h = filetags[key]
229 h.append(n)
229 h.append(n)
230 filetags[key] = (bin_n, h)
230 filetags[key] = (bin_n, h)
231
231
232 for k, nh in filetags.items():
232 for k, nh in filetags.items():
233 if k not in globaltags:
233 if k not in globaltags:
234 globaltags[k] = nh
234 globaltags[k] = nh
235 continue
235 continue
236 # we prefer the global tag if:
236 # we prefer the global tag if:
237 # it supercedes us OR
237 # it supercedes us OR
238 # mutual supercedes and it has a higher rank
238 # mutual supercedes and it has a higher rank
239 # otherwise we win because we're tip-most
239 # otherwise we win because we're tip-most
240 an, ah = nh
240 an, ah = nh
241 bn, bh = globaltags[k]
241 bn, bh = globaltags[k]
242 if (bn != an and an in bh and
242 if (bn != an and an in bh and
243 (bn not in ah or len(bh) > len(ah))):
243 (bn not in ah or len(bh) > len(ah))):
244 an = bn
244 an = bn
245 ah.extend([n for n in bh if n not in ah])
245 ah.extend([n for n in bh if n not in ah])
246 globaltags[k] = an, ah
246 globaltags[k] = an, ah
247
247
248 # read the tags file from each head, ending with the tip
248 # read the tags file from each head, ending with the tip
249 f = None
249 f = None
250 for rev, node, fnode in self._hgtagsnodes():
250 for rev, node, fnode in self._hgtagsnodes():
251 f = (f and f.filectx(fnode) or
251 f = (f and f.filectx(fnode) or
252 self.filectx('.hgtags', fileid=fnode))
252 self.filectx('.hgtags', fileid=fnode))
253 readtags(f.data().splitlines(), f)
253 readtags(f.data().splitlines(), f)
254
254
255 try:
255 try:
256 data = util.fromlocal(self.opener("localtags").read())
256 data = util.fromlocal(self.opener("localtags").read())
257 # localtags are stored in the local character set
257 # localtags are stored in the local character set
258 # while the internal tag table is stored in UTF-8
258 # while the internal tag table is stored in UTF-8
259 readtags(data.splitlines(), "localtags")
259 readtags(data.splitlines(), "localtags")
260 except IOError:
260 except IOError:
261 pass
261 pass
262
262
263 self.tagscache = {}
263 self.tagscache = {}
264 for k,nh in globaltags.items():
264 for k,nh in globaltags.items():
265 n = nh[0]
265 n = nh[0]
266 if n != nullid:
266 if n != nullid:
267 self.tagscache[k] = n
267 self.tagscache[k] = n
268 self.tagscache['tip'] = self.changelog.tip()
268 self.tagscache['tip'] = self.changelog.tip()
269
269
270 return self.tagscache
270 return self.tagscache
271
271
272 def _hgtagsnodes(self):
272 def _hgtagsnodes(self):
273 heads = self.heads()
273 heads = self.heads()
274 heads.reverse()
274 heads.reverse()
275 last = {}
275 last = {}
276 ret = []
276 ret = []
277 for node in heads:
277 for node in heads:
278 c = self.changectx(node)
278 c = self.changectx(node)
279 rev = c.rev()
279 rev = c.rev()
280 try:
280 try:
281 fnode = c.filenode('.hgtags')
281 fnode = c.filenode('.hgtags')
282 except revlog.LookupError:
282 except revlog.LookupError:
283 continue
283 continue
284 ret.append((rev, node, fnode))
284 ret.append((rev, node, fnode))
285 if fnode in last:
285 if fnode in last:
286 ret[last[fnode]] = None
286 ret[last[fnode]] = None
287 last[fnode] = len(ret) - 1
287 last[fnode] = len(ret) - 1
288 return [item for item in ret if item]
288 return [item for item in ret if item]
289
289
290 def tagslist(self):
290 def tagslist(self):
291 '''return a list of tags ordered by revision'''
291 '''return a list of tags ordered by revision'''
292 l = []
292 l = []
293 for t, n in self.tags().items():
293 for t, n in self.tags().items():
294 try:
294 try:
295 r = self.changelog.rev(n)
295 r = self.changelog.rev(n)
296 except:
296 except:
297 r = -2 # sort to the beginning of the list if unknown
297 r = -2 # sort to the beginning of the list if unknown
298 l.append((r, t, n))
298 l.append((r, t, n))
299 l.sort()
299 l.sort()
300 return [(t, n) for r, t, n in l]
300 return [(t, n) for r, t, n in l]
301
301
302 def nodetags(self, node):
302 def nodetags(self, node):
303 '''return the tags associated with a node'''
303 '''return the tags associated with a node'''
304 if not self.nodetagscache:
304 if not self.nodetagscache:
305 self.nodetagscache = {}
305 self.nodetagscache = {}
306 for t, n in self.tags().items():
306 for t, n in self.tags().items():
307 self.nodetagscache.setdefault(n, []).append(t)
307 self.nodetagscache.setdefault(n, []).append(t)
308 return self.nodetagscache.get(node, [])
308 return self.nodetagscache.get(node, [])
309
309
310 def _branchtags(self):
310 def _branchtags(self):
311 partial, last, lrev = self._readbranchcache()
311 partial, last, lrev = self._readbranchcache()
312
312
313 tiprev = self.changelog.count() - 1
313 tiprev = self.changelog.count() - 1
314 if lrev != tiprev:
314 if lrev != tiprev:
315 self._updatebranchcache(partial, lrev+1, tiprev+1)
315 self._updatebranchcache(partial, lrev+1, tiprev+1)
316 self._writebranchcache(partial, self.changelog.tip(), tiprev)
316 self._writebranchcache(partial, self.changelog.tip(), tiprev)
317
317
318 return partial
318 return partial
319
319
320 def branchtags(self):
320 def branchtags(self):
321 if self.branchcache is not None:
321 if self.branchcache is not None:
322 return self.branchcache
322 return self.branchcache
323
323
324 self.branchcache = {} # avoid recursion in changectx
324 self.branchcache = {} # avoid recursion in changectx
325 partial = self._branchtags()
325 partial = self._branchtags()
326
326
327 # the branch cache is stored on disk as UTF-8, but in the local
327 # the branch cache is stored on disk as UTF-8, but in the local
328 # charset internally
328 # charset internally
329 for k, v in partial.items():
329 for k, v in partial.items():
330 self.branchcache[util.tolocal(k)] = v
330 self.branchcache[util.tolocal(k)] = v
331 return self.branchcache
331 return self.branchcache
332
332
333 def _readbranchcache(self):
333 def _readbranchcache(self):
334 partial = {}
334 partial = {}
335 try:
335 try:
336 f = self.opener("branch.cache")
336 f = self.opener("branch.cache")
337 lines = f.read().split('\n')
337 lines = f.read().split('\n')
338 f.close()
338 f.close()
339 except (IOError, OSError):
339 except (IOError, OSError):
340 return {}, nullid, nullrev
340 return {}, nullid, nullrev
341
341
342 try:
342 try:
343 last, lrev = lines.pop(0).split(" ", 1)
343 last, lrev = lines.pop(0).split(" ", 1)
344 last, lrev = bin(last), int(lrev)
344 last, lrev = bin(last), int(lrev)
345 if not (lrev < self.changelog.count() and
345 if not (lrev < self.changelog.count() and
346 self.changelog.node(lrev) == last): # sanity check
346 self.changelog.node(lrev) == last): # sanity check
347 # invalidate the cache
347 # invalidate the cache
348 raise ValueError('Invalid branch cache: unknown tip')
348 raise ValueError('Invalid branch cache: unknown tip')
349 for l in lines:
349 for l in lines:
350 if not l: continue
350 if not l: continue
351 node, label = l.split(" ", 1)
351 node, label = l.split(" ", 1)
352 partial[label.strip()] = bin(node)
352 partial[label.strip()] = bin(node)
353 except (KeyboardInterrupt, util.SignalInterrupt):
353 except (KeyboardInterrupt, util.SignalInterrupt):
354 raise
354 raise
355 except Exception, inst:
355 except Exception, inst:
356 if self.ui.debugflag:
356 if self.ui.debugflag:
357 self.ui.warn(str(inst), '\n')
357 self.ui.warn(str(inst), '\n')
358 partial, last, lrev = {}, nullid, nullrev
358 partial, last, lrev = {}, nullid, nullrev
359 return partial, last, lrev
359 return partial, last, lrev
360
360
361 def _writebranchcache(self, branches, tip, tiprev):
361 def _writebranchcache(self, branches, tip, tiprev):
362 try:
362 try:
363 f = self.opener("branch.cache", "w", atomictemp=True)
363 f = self.opener("branch.cache", "w", atomictemp=True)
364 f.write("%s %s\n" % (hex(tip), tiprev))
364 f.write("%s %s\n" % (hex(tip), tiprev))
365 for label, node in branches.iteritems():
365 for label, node in branches.iteritems():
366 f.write("%s %s\n" % (hex(node), label))
366 f.write("%s %s\n" % (hex(node), label))
367 f.rename()
367 f.rename()
368 except (IOError, OSError):
368 except (IOError, OSError):
369 pass
369 pass
370
370
371 def _updatebranchcache(self, partial, start, end):
371 def _updatebranchcache(self, partial, start, end):
372 for r in xrange(start, end):
372 for r in xrange(start, end):
373 c = self.changectx(r)
373 c = self.changectx(r)
374 b = c.branch()
374 b = c.branch()
375 partial[b] = c.node()
375 partial[b] = c.node()
376
376
377 def lookup(self, key):
377 def lookup(self, key):
378 if key == '.':
378 if key == '.':
379 key, second = self.dirstate.parents()
379 key, second = self.dirstate.parents()
380 if key == nullid:
380 if key == nullid:
381 raise repo.RepoError(_("no revision checked out"))
381 raise repo.RepoError(_("no revision checked out"))
382 if second != nullid:
382 if second != nullid:
383 self.ui.warn(_("warning: working directory has two parents, "
383 self.ui.warn(_("warning: working directory has two parents, "
384 "tag '.' uses the first\n"))
384 "tag '.' uses the first\n"))
385 elif key == 'null':
385 elif key == 'null':
386 return nullid
386 return nullid
387 n = self.changelog._match(key)
387 n = self.changelog._match(key)
388 if n:
388 if n:
389 return n
389 return n
390 if key in self.tags():
390 if key in self.tags():
391 return self.tags()[key]
391 return self.tags()[key]
392 if key in self.branchtags():
392 if key in self.branchtags():
393 return self.branchtags()[key]
393 return self.branchtags()[key]
394 n = self.changelog._partialmatch(key)
394 n = self.changelog._partialmatch(key)
395 if n:
395 if n:
396 return n
396 return n
397 try:
397 try:
398 if len(key) == 20:
398 if len(key) == 20:
399 key = hex(key)
399 key = hex(key)
400 except:
400 except:
401 pass
401 pass
402 raise repo.RepoError(_("unknown revision '%s'") % key)
402 raise repo.RepoError(_("unknown revision '%s'") % key)
403
403
404 def dev(self):
404 def dev(self):
405 return os.lstat(self.path).st_dev
405 return os.lstat(self.path).st_dev
406
406
407 def local(self):
407 def local(self):
408 return True
408 return True
409
409
410 def join(self, f):
410 def join(self, f):
411 return os.path.join(self.path, f)
411 return os.path.join(self.path, f)
412
412
413 def sjoin(self, f):
413 def sjoin(self, f):
414 f = self.encodefn(f)
414 f = self.encodefn(f)
415 return os.path.join(self.spath, f)
415 return os.path.join(self.spath, f)
416
416
417 def wjoin(self, f):
417 def wjoin(self, f):
418 return os.path.join(self.root, f)
418 return os.path.join(self.root, f)
419
419
420 def file(self, f):
420 def file(self, f):
421 if f[0] == '/':
421 if f[0] == '/':
422 f = f[1:]
422 f = f[1:]
423 return filelog.filelog(self.sopener, f)
423 return filelog.filelog(self.sopener, f)
424
424
425 def changectx(self, changeid=None):
425 def changectx(self, changeid=None):
426 return context.changectx(self, changeid)
426 return context.changectx(self, changeid)
427
427
428 def workingctx(self):
428 def workingctx(self):
429 return context.workingctx(self)
429 return context.workingctx(self)
430
430
431 def parents(self, changeid=None):
431 def parents(self, changeid=None):
432 '''
432 '''
433 get list of changectxs for parents of changeid or working directory
433 get list of changectxs for parents of changeid or working directory
434 '''
434 '''
435 if changeid is None:
435 if changeid is None:
436 pl = self.dirstate.parents()
436 pl = self.dirstate.parents()
437 else:
437 else:
438 n = self.changelog.lookup(changeid)
438 n = self.changelog.lookup(changeid)
439 pl = self.changelog.parents(n)
439 pl = self.changelog.parents(n)
440 if pl[1] == nullid:
440 if pl[1] == nullid:
441 return [self.changectx(pl[0])]
441 return [self.changectx(pl[0])]
442 return [self.changectx(pl[0]), self.changectx(pl[1])]
442 return [self.changectx(pl[0]), self.changectx(pl[1])]
443
443
444 def filectx(self, path, changeid=None, fileid=None):
444 def filectx(self, path, changeid=None, fileid=None):
445 """changeid can be a changeset revision, node, or tag.
445 """changeid can be a changeset revision, node, or tag.
446 fileid can be a file revision or node."""
446 fileid can be a file revision or node."""
447 return context.filectx(self, path, changeid, fileid)
447 return context.filectx(self, path, changeid, fileid)
448
448
449 def getcwd(self):
449 def getcwd(self):
450 return self.dirstate.getcwd()
450 return self.dirstate.getcwd()
451
451
452 def pathto(self, f, cwd=None):
452 def pathto(self, f, cwd=None):
453 return self.dirstate.pathto(f, cwd)
453 return self.dirstate.pathto(f, cwd)
454
454
455 def wfile(self, f, mode='r'):
455 def wfile(self, f, mode='r'):
456 return self.wopener(f, mode)
456 return self.wopener(f, mode)
457
457
458 def _link(self, f):
458 def _link(self, f):
459 return os.path.islink(self.wjoin(f))
459 return os.path.islink(self.wjoin(f))
460
460
461 def _filter(self, filter, filename, data):
461 def _filter(self, filter, filename, data):
462 if filter not in self.filterpats:
462 if filter not in self.filterpats:
463 l = []
463 l = []
464 for pat, cmd in self.ui.configitems(filter):
464 for pat, cmd in self.ui.configitems(filter):
465 mf = util.matcher(self.root, "", [pat], [], [])[1]
465 mf = util.matcher(self.root, "", [pat], [], [])[1]
466 l.append((mf, cmd))
466 l.append((mf, cmd))
467 self.filterpats[filter] = l
467 self.filterpats[filter] = l
468
468
469 for mf, cmd in self.filterpats[filter]:
469 for mf, cmd in self.filterpats[filter]:
470 if mf(filename):
470 if mf(filename):
471 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
471 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
472 data = util.filter(data, cmd)
472 data = util.filter(data, cmd)
473 break
473 break
474
474
475 return data
475 return data
476
476
477 def wread(self, filename):
477 def wread(self, filename):
478 if self._link(filename):
478 if self._link(filename):
479 data = os.readlink(self.wjoin(filename))
479 data = os.readlink(self.wjoin(filename))
480 else:
480 else:
481 data = self.wopener(filename, 'r').read()
481 data = self.wopener(filename, 'r').read()
482 return self._filter("encode", filename, data)
482 return self._filter("encode", filename, data)
483
483
484 def wwrite(self, filename, data, flags):
484 def wwrite(self, filename, data, flags):
485 data = self._filter("decode", filename, data)
485 data = self._filter("decode", filename, data)
486 if "l" in flags:
486 if "l" in flags:
487 self.wopener.symlink(data, filename)
487 self.wopener.symlink(data, filename)
488 else:
488 else:
489 try:
489 try:
490 if self._link(filename):
490 if self._link(filename):
491 os.unlink(self.wjoin(filename))
491 os.unlink(self.wjoin(filename))
492 except OSError:
492 except OSError:
493 pass
493 pass
494 self.wopener(filename, 'w').write(data)
494 self.wopener(filename, 'w').write(data)
495 util.set_exec(self.wjoin(filename), "x" in flags)
495 util.set_exec(self.wjoin(filename), "x" in flags)
496
496
497 def wwritedata(self, filename, data):
497 def wwritedata(self, filename, data):
498 return self._filter("decode", filename, data)
498 return self._filter("decode", filename, data)
499
499
500 def transaction(self):
500 def transaction(self):
501 if self._transref and self._transref():
501 if self._transref and self._transref():
502 return self._transref().nest()
502 return self._transref().nest()
503
503
504 # save dirstate for rollback
504 # save dirstate for rollback
505 try:
505 try:
506 ds = self.opener("dirstate").read()
506 ds = self.opener("dirstate").read()
507 except IOError:
507 except IOError:
508 ds = ""
508 ds = ""
509 self.opener("journal.dirstate", "w").write(ds)
509 self.opener("journal.dirstate", "w").write(ds)
510
510
511 renames = [(self.sjoin("journal"), self.sjoin("undo")),
511 renames = [(self.sjoin("journal"), self.sjoin("undo")),
512 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
512 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
513 tr = transaction.transaction(self.ui.warn, self.sopener,
513 tr = transaction.transaction(self.ui.warn, self.sopener,
514 self.sjoin("journal"),
514 self.sjoin("journal"),
515 aftertrans(renames))
515 aftertrans(renames))
516 self._transref = weakref.ref(tr)
516 self._transref = weakref.ref(tr)
517 return tr
517 return tr
518
518
519 def recover(self):
519 def recover(self):
520 l = self.lock()
520 l = self.lock()
521 try:
521 try:
522 if os.path.exists(self.sjoin("journal")):
522 if os.path.exists(self.sjoin("journal")):
523 self.ui.status(_("rolling back interrupted transaction\n"))
523 self.ui.status(_("rolling back interrupted transaction\n"))
524 transaction.rollback(self.sopener, self.sjoin("journal"))
524 transaction.rollback(self.sopener, self.sjoin("journal"))
525 self.invalidate()
525 self.invalidate()
526 return True
526 return True
527 else:
527 else:
528 self.ui.warn(_("no interrupted transaction available\n"))
528 self.ui.warn(_("no interrupted transaction available\n"))
529 return False
529 return False
530 finally:
530 finally:
531 del l
531 del l
532
532
533 def rollback(self):
533 def rollback(self):
534 wlock = lock = None
534 wlock = lock = None
535 try:
535 try:
536 wlock = self.wlock()
536 wlock = self.wlock()
537 lock = self.lock()
537 lock = self.lock()
538 if os.path.exists(self.sjoin("undo")):
538 if os.path.exists(self.sjoin("undo")):
539 self.ui.status(_("rolling back last transaction\n"))
539 self.ui.status(_("rolling back last transaction\n"))
540 transaction.rollback(self.sopener, self.sjoin("undo"))
540 transaction.rollback(self.sopener, self.sjoin("undo"))
541 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
541 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
542 self.invalidate()
542 self.invalidate()
543 self.dirstate.invalidate()
543 self.dirstate.invalidate()
544 else:
544 else:
545 self.ui.warn(_("no rollback information available\n"))
545 self.ui.warn(_("no rollback information available\n"))
546 finally:
546 finally:
547 del wlock, lock
547 del wlock, lock
548
548
549 def invalidate(self):
549 def invalidate(self):
550 for a in "changelog manifest".split():
550 for a in "changelog manifest".split():
551 if hasattr(self, a):
551 if hasattr(self, a):
552 self.__delattr__(a)
552 self.__delattr__(a)
553 self.tagscache = None
553 self.tagscache = None
554 self.nodetagscache = None
554 self.nodetagscache = None
555
555
556 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
556 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
557 try:
557 try:
558 l = lock.lock(lockname, 0, releasefn, desc=desc)
558 l = lock.lock(lockname, 0, releasefn, desc=desc)
559 except lock.LockHeld, inst:
559 except lock.LockHeld, inst:
560 if not wait:
560 if not wait:
561 raise
561 raise
562 self.ui.warn(_("waiting for lock on %s held by %r\n") %
562 self.ui.warn(_("waiting for lock on %s held by %r\n") %
563 (desc, inst.locker))
563 (desc, inst.locker))
564 # default to 600 seconds timeout
564 # default to 600 seconds timeout
565 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
565 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
566 releasefn, desc=desc)
566 releasefn, desc=desc)
567 if acquirefn:
567 if acquirefn:
568 acquirefn()
568 acquirefn()
569 return l
569 return l
570
570
571 def lock(self, wait=True):
571 def lock(self, wait=True):
572 if self._lockref and self._lockref():
572 if self._lockref and self._lockref():
573 return self._lockref()
573 return self._lockref()
574
574
575 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
575 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
576 _('repository %s') % self.origroot)
576 _('repository %s') % self.origroot)
577 self._lockref = weakref.ref(l)
577 self._lockref = weakref.ref(l)
578 return l
578 return l
579
579
580 def wlock(self, wait=True):
580 def wlock(self, wait=True):
581 if self._wlockref and self._wlockref():
581 if self._wlockref and self._wlockref():
582 return self._wlockref()
582 return self._wlockref()
583
583
584 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
584 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
585 self.dirstate.invalidate, _('working directory of %s') %
585 self.dirstate.invalidate, _('working directory of %s') %
586 self.origroot)
586 self.origroot)
587 self._wlockref = weakref.ref(l)
587 self._wlockref = weakref.ref(l)
588 return l
588 return l
589
589
590 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
590 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
591 """
591 """
592 commit an individual file as part of a larger transaction
592 commit an individual file as part of a larger transaction
593 """
593 """
594
594
595 t = self.wread(fn)
595 t = self.wread(fn)
596 fl = self.file(fn)
596 fl = self.file(fn)
597 fp1 = manifest1.get(fn, nullid)
597 fp1 = manifest1.get(fn, nullid)
598 fp2 = manifest2.get(fn, nullid)
598 fp2 = manifest2.get(fn, nullid)
599
599
600 meta = {}
600 meta = {}
601 cp = self.dirstate.copied(fn)
601 cp = self.dirstate.copied(fn)
602 if cp:
602 if cp:
603 # Mark the new revision of this file as a copy of another
603 # Mark the new revision of this file as a copy of another
604 # file. This copy data will effectively act as a parent
604 # file. This copy data will effectively act as a parent
605 # of this new revision. If this is a merge, the first
605 # of this new revision. If this is a merge, the first
606 # parent will be the nullid (meaning "look up the copy data")
606 # parent will be the nullid (meaning "look up the copy data")
607 # and the second one will be the other parent. For example:
607 # and the second one will be the other parent. For example:
608 #
608 #
609 # 0 --- 1 --- 3 rev1 changes file foo
609 # 0 --- 1 --- 3 rev1 changes file foo
610 # \ / rev2 renames foo to bar and changes it
610 # \ / rev2 renames foo to bar and changes it
611 # \- 2 -/ rev3 should have bar with all changes and
611 # \- 2 -/ rev3 should have bar with all changes and
612 # should record that bar descends from
612 # should record that bar descends from
613 # bar in rev2 and foo in rev1
613 # bar in rev2 and foo in rev1
614 #
614 #
615 # this allows this merge to succeed:
615 # this allows this merge to succeed:
616 #
616 #
617 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
617 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
618 # \ / merging rev3 and rev4 should use bar@rev2
618 # \ / merging rev3 and rev4 should use bar@rev2
619 # \- 2 --- 4 as the merge base
619 # \- 2 --- 4 as the merge base
620 #
620 #
621 meta["copy"] = cp
621 meta["copy"] = cp
622 if not manifest2: # not a branch merge
622 if not manifest2: # not a branch merge
623 meta["copyrev"] = hex(manifest1.get(cp, nullid))
623 meta["copyrev"] = hex(manifest1.get(cp, nullid))
624 fp2 = nullid
624 fp2 = nullid
625 elif fp2 != nullid: # copied on remote side
625 elif fp2 != nullid: # copied on remote side
626 meta["copyrev"] = hex(manifest1.get(cp, nullid))
626 meta["copyrev"] = hex(manifest1.get(cp, nullid))
627 elif fp1 != nullid: # copied on local side, reversed
627 elif fp1 != nullid: # copied on local side, reversed
628 meta["copyrev"] = hex(manifest2.get(cp))
628 meta["copyrev"] = hex(manifest2.get(cp))
629 fp2 = fp1
629 fp2 = fp1
630 else: # directory rename
630 else: # directory rename
631 meta["copyrev"] = hex(manifest1.get(cp, nullid))
631 meta["copyrev"] = hex(manifest1.get(cp, nullid))
632 self.ui.debug(_(" %s: copy %s:%s\n") %
632 self.ui.debug(_(" %s: copy %s:%s\n") %
633 (fn, cp, meta["copyrev"]))
633 (fn, cp, meta["copyrev"]))
634 fp1 = nullid
634 fp1 = nullid
635 elif fp2 != nullid:
635 elif fp2 != nullid:
636 # is one parent an ancestor of the other?
636 # is one parent an ancestor of the other?
637 fpa = fl.ancestor(fp1, fp2)
637 fpa = fl.ancestor(fp1, fp2)
638 if fpa == fp1:
638 if fpa == fp1:
639 fp1, fp2 = fp2, nullid
639 fp1, fp2 = fp2, nullid
640 elif fpa == fp2:
640 elif fpa == fp2:
641 fp2 = nullid
641 fp2 = nullid
642
642
643 # is the file unmodified from the parent? report existing entry
643 # is the file unmodified from the parent? report existing entry
644 if fp2 == nullid and not fl.cmp(fp1, t):
644 if fp2 == nullid and not fl.cmp(fp1, t):
645 return fp1
645 return fp1
646
646
647 changelist.append(fn)
647 changelist.append(fn)
648 return fl.add(t, meta, tr, linkrev, fp1, fp2)
648 return fl.add(t, meta, tr, linkrev, fp1, fp2)
649
649
650 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
650 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
651 if p1 is None:
651 if p1 is None:
652 p1, p2 = self.dirstate.parents()
652 p1, p2 = self.dirstate.parents()
653 return self.commit(files=files, text=text, user=user, date=date,
653 return self.commit(files=files, text=text, user=user, date=date,
654 p1=p1, p2=p2, extra=extra)
654 p1=p1, p2=p2, extra=extra, empty_ok=True)
655
655
656 def commit(self, files=None, text="", user=None, date=None,
656 def commit(self, files=None, text="", user=None, date=None,
657 match=util.always, force=False, force_editor=False,
657 match=util.always, force=False, force_editor=False,
658 p1=None, p2=None, extra={}):
658 p1=None, p2=None, extra={}, empty_ok=False):
659 wlock = lock = tr = None
659 wlock = lock = tr = None
660 try:
660 try:
661 commit = []
661 commit = []
662 remove = []
662 remove = []
663 changed = []
663 changed = []
664 use_dirstate = (p1 is None) # not rawcommit
664 use_dirstate = (p1 is None) # not rawcommit
665 extra = extra.copy()
665 extra = extra.copy()
666
666
667 if use_dirstate:
667 if use_dirstate:
668 if files:
668 if files:
669 for f in files:
669 for f in files:
670 s = self.dirstate[f]
670 s = self.dirstate[f]
671 if s in 'nma':
671 if s in 'nma':
672 commit.append(f)
672 commit.append(f)
673 elif s == 'r':
673 elif s == 'r':
674 remove.append(f)
674 remove.append(f)
675 else:
675 else:
676 self.ui.warn(_("%s not tracked!\n") % f)
676 self.ui.warn(_("%s not tracked!\n") % f)
677 else:
677 else:
678 changes = self.status(match=match)[:5]
678 changes = self.status(match=match)[:5]
679 modified, added, removed, deleted, unknown = changes
679 modified, added, removed, deleted, unknown = changes
680 commit = modified + added
680 commit = modified + added
681 remove = removed
681 remove = removed
682 else:
682 else:
683 commit = files
683 commit = files
684
684
685 if use_dirstate:
685 if use_dirstate:
686 p1, p2 = self.dirstate.parents()
686 p1, p2 = self.dirstate.parents()
687 update_dirstate = True
687 update_dirstate = True
688 else:
688 else:
689 p1, p2 = p1, p2 or nullid
689 p1, p2 = p1, p2 or nullid
690 update_dirstate = (self.dirstate.parents()[0] == p1)
690 update_dirstate = (self.dirstate.parents()[0] == p1)
691
691
692 c1 = self.changelog.read(p1)
692 c1 = self.changelog.read(p1)
693 c2 = self.changelog.read(p2)
693 c2 = self.changelog.read(p2)
694 m1 = self.manifest.read(c1[0]).copy()
694 m1 = self.manifest.read(c1[0]).copy()
695 m2 = self.manifest.read(c2[0])
695 m2 = self.manifest.read(c2[0])
696
696
697 if use_dirstate:
697 if use_dirstate:
698 branchname = self.workingctx().branch()
698 branchname = self.workingctx().branch()
699 try:
699 try:
700 branchname = branchname.decode('UTF-8').encode('UTF-8')
700 branchname = branchname.decode('UTF-8').encode('UTF-8')
701 except UnicodeDecodeError:
701 except UnicodeDecodeError:
702 raise util.Abort(_('branch name not in UTF-8!'))
702 raise util.Abort(_('branch name not in UTF-8!'))
703 else:
703 else:
704 branchname = ""
704 branchname = ""
705
705
706 if use_dirstate:
706 if use_dirstate:
707 oldname = c1[5].get("branch") # stored in UTF-8
707 oldname = c1[5].get("branch") # stored in UTF-8
708 if (not commit and not remove and not force and p2 == nullid
708 if (not commit and not remove and not force and p2 == nullid
709 and branchname == oldname):
709 and branchname == oldname):
710 self.ui.status(_("nothing changed\n"))
710 self.ui.status(_("nothing changed\n"))
711 return None
711 return None
712
712
713 xp1 = hex(p1)
713 xp1 = hex(p1)
714 if p2 == nullid: xp2 = ''
714 if p2 == nullid: xp2 = ''
715 else: xp2 = hex(p2)
715 else: xp2 = hex(p2)
716
716
717 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
717 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
718
718
719 wlock = self.wlock()
719 wlock = self.wlock()
720 lock = self.lock()
720 lock = self.lock()
721 tr = self.transaction()
721 tr = self.transaction()
722 trp = weakref.proxy(tr)
722 trp = weakref.proxy(tr)
723
723
724 # check in files
724 # check in files
725 new = {}
725 new = {}
726 linkrev = self.changelog.count()
726 linkrev = self.changelog.count()
727 commit.sort()
727 commit.sort()
728 is_exec = util.execfunc(self.root, m1.execf)
728 is_exec = util.execfunc(self.root, m1.execf)
729 is_link = util.linkfunc(self.root, m1.linkf)
729 is_link = util.linkfunc(self.root, m1.linkf)
730 for f in commit:
730 for f in commit:
731 self.ui.note(f + "\n")
731 self.ui.note(f + "\n")
732 try:
732 try:
733 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
733 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
734 new_exec = is_exec(f)
734 new_exec = is_exec(f)
735 new_link = is_link(f)
735 new_link = is_link(f)
736 if not changed or changed[-1] != f:
736 if not changed or changed[-1] != f:
737 # mention the file in the changelog if some
737 # mention the file in the changelog if some
738 # flag changed, even if there was no content
738 # flag changed, even if there was no content
739 # change.
739 # change.
740 old_exec = m1.execf(f)
740 old_exec = m1.execf(f)
741 old_link = m1.linkf(f)
741 old_link = m1.linkf(f)
742 if old_exec != new_exec or old_link != new_link:
742 if old_exec != new_exec or old_link != new_link:
743 changed.append(f)
743 changed.append(f)
744 m1.set(f, new_exec, new_link)
744 m1.set(f, new_exec, new_link)
745 except (OSError, IOError):
745 except (OSError, IOError):
746 if use_dirstate:
746 if use_dirstate:
747 self.ui.warn(_("trouble committing %s!\n") % f)
747 self.ui.warn(_("trouble committing %s!\n") % f)
748 raise
748 raise
749 else:
749 else:
750 remove.append(f)
750 remove.append(f)
751
751
752 # update manifest
752 # update manifest
753 m1.update(new)
753 m1.update(new)
754 remove.sort()
754 remove.sort()
755 removed = []
755 removed = []
756
756
757 for f in remove:
757 for f in remove:
758 if f in m1:
758 if f in m1:
759 del m1[f]
759 del m1[f]
760 removed.append(f)
760 removed.append(f)
761 elif f in m2:
761 elif f in m2:
762 removed.append(f)
762 removed.append(f)
763 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
763 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
764 (new, removed))
764 (new, removed))
765
765
766 # add changeset
766 # add changeset
767 new = new.keys()
767 new = new.keys()
768 new.sort()
768 new.sort()
769
769
770 user = user or self.ui.username()
770 user = user or self.ui.username()
771 if not text or force_editor:
771 if (not empty_ok and not text) or force_editor:
772 edittext = []
772 edittext = []
773 if text:
773 if text:
774 edittext.append(text)
774 edittext.append(text)
775 edittext.append("")
775 edittext.append("")
776 edittext.append("HG: user: %s" % user)
776 edittext.append("HG: user: %s" % user)
777 if p2 != nullid:
777 if p2 != nullid:
778 edittext.append("HG: branch merge")
778 edittext.append("HG: branch merge")
779 if branchname:
779 if branchname:
780 edittext.append("HG: branch %s" % util.tolocal(branchname))
780 edittext.append("HG: branch %s" % util.tolocal(branchname))
781 edittext.extend(["HG: changed %s" % f for f in changed])
781 edittext.extend(["HG: changed %s" % f for f in changed])
782 edittext.extend(["HG: removed %s" % f for f in removed])
782 edittext.extend(["HG: removed %s" % f for f in removed])
783 if not changed and not remove:
783 if not changed and not remove:
784 edittext.append("HG: no files changed")
784 edittext.append("HG: no files changed")
785 edittext.append("")
785 edittext.append("")
786 # run editor in the repository root
786 # run editor in the repository root
787 olddir = os.getcwd()
787 olddir = os.getcwd()
788 os.chdir(self.root)
788 os.chdir(self.root)
789 text = self.ui.edit("\n".join(edittext), user)
789 text = self.ui.edit("\n".join(edittext), user)
790 os.chdir(olddir)
790 os.chdir(olddir)
791
791
792 if branchname:
792 if branchname:
793 extra["branch"] = branchname
793 extra["branch"] = branchname
794
794
795 if use_dirstate:
795 if use_dirstate:
796 lines = [line.rstrip() for line in text.rstrip().splitlines()]
796 lines = [line.rstrip() for line in text.rstrip().splitlines()]
797 while lines and not lines[0]:
797 while lines and not lines[0]:
798 del lines[0]
798 del lines[0]
799 if not lines:
799 if not lines:
800 return None
800 return None
801 text = '\n'.join(lines)
801 text = '\n'.join(lines)
802
802
803 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
803 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
804 user, date, extra)
804 user, date, extra)
805 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
805 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
806 parent2=xp2)
806 parent2=xp2)
807 tr.close()
807 tr.close()
808
808
809 if self.branchcache and "branch" in extra:
809 if self.branchcache and "branch" in extra:
810 self.branchcache[util.tolocal(extra["branch"])] = n
810 self.branchcache[util.tolocal(extra["branch"])] = n
811
811
812 if use_dirstate or update_dirstate:
812 if use_dirstate or update_dirstate:
813 self.dirstate.setparents(n)
813 self.dirstate.setparents(n)
814 if use_dirstate:
814 if use_dirstate:
815 for f in new:
815 for f in new:
816 self.dirstate.normal(f)
816 self.dirstate.normal(f)
817 for f in removed:
817 for f in removed:
818 self.dirstate.forget(f)
818 self.dirstate.forget(f)
819
819
820 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
820 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
821 return n
821 return n
822 finally:
822 finally:
823 del lock, wlock, tr
823 del lock, wlock, tr
824
824
825 def walk(self, node=None, files=[], match=util.always, badmatch=None):
825 def walk(self, node=None, files=[], match=util.always, badmatch=None):
826 '''
826 '''
827 walk recursively through the directory tree or a given
827 walk recursively through the directory tree or a given
828 changeset, finding all files matched by the match
828 changeset, finding all files matched by the match
829 function
829 function
830
830
831 results are yielded in a tuple (src, filename), where src
831 results are yielded in a tuple (src, filename), where src
832 is one of:
832 is one of:
833 'f' the file was found in the directory tree
833 'f' the file was found in the directory tree
834 'm' the file was only in the dirstate and not in the tree
834 'm' the file was only in the dirstate and not in the tree
835 'b' file was not found and matched badmatch
835 'b' file was not found and matched badmatch
836 '''
836 '''
837
837
838 if node:
838 if node:
839 fdict = dict.fromkeys(files)
839 fdict = dict.fromkeys(files)
840 # for dirstate.walk, files=['.'] means "walk the whole tree".
840 # for dirstate.walk, files=['.'] means "walk the whole tree".
841 # follow that here, too
841 # follow that here, too
842 fdict.pop('.', None)
842 fdict.pop('.', None)
843 mdict = self.manifest.read(self.changelog.read(node)[0])
843 mdict = self.manifest.read(self.changelog.read(node)[0])
844 mfiles = mdict.keys()
844 mfiles = mdict.keys()
845 mfiles.sort()
845 mfiles.sort()
846 for fn in mfiles:
846 for fn in mfiles:
847 for ffn in fdict:
847 for ffn in fdict:
848 # match if the file is the exact name or a directory
848 # match if the file is the exact name or a directory
849 if ffn == fn or fn.startswith("%s/" % ffn):
849 if ffn == fn or fn.startswith("%s/" % ffn):
850 del fdict[ffn]
850 del fdict[ffn]
851 break
851 break
852 if match(fn):
852 if match(fn):
853 yield 'm', fn
853 yield 'm', fn
854 ffiles = fdict.keys()
854 ffiles = fdict.keys()
855 ffiles.sort()
855 ffiles.sort()
856 for fn in ffiles:
856 for fn in ffiles:
857 if badmatch and badmatch(fn):
857 if badmatch and badmatch(fn):
858 if match(fn):
858 if match(fn):
859 yield 'b', fn
859 yield 'b', fn
860 else:
860 else:
861 self.ui.warn(_('%s: No such file in rev %s\n')
861 self.ui.warn(_('%s: No such file in rev %s\n')
862 % (self.pathto(fn), short(node)))
862 % (self.pathto(fn), short(node)))
863 else:
863 else:
864 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
864 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
865 yield src, fn
865 yield src, fn
866
866
867 def status(self, node1=None, node2=None, files=[], match=util.always,
867 def status(self, node1=None, node2=None, files=[], match=util.always,
868 list_ignored=False, list_clean=False):
868 list_ignored=False, list_clean=False):
869 """return status of files between two nodes or node and working directory
869 """return status of files between two nodes or node and working directory
870
870
871 If node1 is None, use the first dirstate parent instead.
871 If node1 is None, use the first dirstate parent instead.
872 If node2 is None, compare node1 with working directory.
872 If node2 is None, compare node1 with working directory.
873 """
873 """
874
874
875 def fcmp(fn, getnode):
875 def fcmp(fn, getnode):
876 t1 = self.wread(fn)
876 t1 = self.wread(fn)
877 return self.file(fn).cmp(getnode(fn), t1)
877 return self.file(fn).cmp(getnode(fn), t1)
878
878
879 def mfmatches(node):
879 def mfmatches(node):
880 change = self.changelog.read(node)
880 change = self.changelog.read(node)
881 mf = self.manifest.read(change[0]).copy()
881 mf = self.manifest.read(change[0]).copy()
882 for fn in mf.keys():
882 for fn in mf.keys():
883 if not match(fn):
883 if not match(fn):
884 del mf[fn]
884 del mf[fn]
885 return mf
885 return mf
886
886
887 modified, added, removed, deleted, unknown = [], [], [], [], []
887 modified, added, removed, deleted, unknown = [], [], [], [], []
888 ignored, clean = [], []
888 ignored, clean = [], []
889
889
890 compareworking = False
890 compareworking = False
891 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
891 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
892 compareworking = True
892 compareworking = True
893
893
894 if not compareworking:
894 if not compareworking:
895 # read the manifest from node1 before the manifest from node2,
895 # read the manifest from node1 before the manifest from node2,
896 # so that we'll hit the manifest cache if we're going through
896 # so that we'll hit the manifest cache if we're going through
897 # all the revisions in parent->child order.
897 # all the revisions in parent->child order.
898 mf1 = mfmatches(node1)
898 mf1 = mfmatches(node1)
899
899
900 # are we comparing the working directory?
900 # are we comparing the working directory?
901 if not node2:
901 if not node2:
902 (lookup, modified, added, removed, deleted, unknown,
902 (lookup, modified, added, removed, deleted, unknown,
903 ignored, clean) = self.dirstate.status(files, match,
903 ignored, clean) = self.dirstate.status(files, match,
904 list_ignored, list_clean)
904 list_ignored, list_clean)
905
905
906 # are we comparing working dir against its parent?
906 # are we comparing working dir against its parent?
907 if compareworking:
907 if compareworking:
908 if lookup:
908 if lookup:
909 fixup = []
909 fixup = []
910 # do a full compare of any files that might have changed
910 # do a full compare of any files that might have changed
911 ctx = self.changectx()
911 ctx = self.changectx()
912 for f in lookup:
912 for f in lookup:
913 if f not in ctx or ctx[f].cmp(self.wread(f)):
913 if f not in ctx or ctx[f].cmp(self.wread(f)):
914 modified.append(f)
914 modified.append(f)
915 else:
915 else:
916 fixup.append(f)
916 fixup.append(f)
917 if list_clean:
917 if list_clean:
918 clean.append(f)
918 clean.append(f)
919
919
920 # update dirstate for files that are actually clean
920 # update dirstate for files that are actually clean
921 if fixup:
921 if fixup:
922 wlock = None
922 wlock = None
923 try:
923 try:
924 try:
924 try:
925 wlock = self.wlock(False)
925 wlock = self.wlock(False)
926 except lock.LockException:
926 except lock.LockException:
927 pass
927 pass
928 if wlock:
928 if wlock:
929 for f in fixup:
929 for f in fixup:
930 self.dirstate.normal(f)
930 self.dirstate.normal(f)
931 finally:
931 finally:
932 del wlock
932 del wlock
933 else:
933 else:
934 # we are comparing working dir against non-parent
934 # we are comparing working dir against non-parent
935 # generate a pseudo-manifest for the working dir
935 # generate a pseudo-manifest for the working dir
936 # XXX: create it in dirstate.py ?
936 # XXX: create it in dirstate.py ?
937 mf2 = mfmatches(self.dirstate.parents()[0])
937 mf2 = mfmatches(self.dirstate.parents()[0])
938 is_exec = util.execfunc(self.root, mf2.execf)
938 is_exec = util.execfunc(self.root, mf2.execf)
939 is_link = util.linkfunc(self.root, mf2.linkf)
939 is_link = util.linkfunc(self.root, mf2.linkf)
940 for f in lookup + modified + added:
940 for f in lookup + modified + added:
941 mf2[f] = ""
941 mf2[f] = ""
942 mf2.set(f, is_exec(f), is_link(f))
942 mf2.set(f, is_exec(f), is_link(f))
943 for f in removed:
943 for f in removed:
944 if f in mf2:
944 if f in mf2:
945 del mf2[f]
945 del mf2[f]
946
946
947 else:
947 else:
948 # we are comparing two revisions
948 # we are comparing two revisions
949 mf2 = mfmatches(node2)
949 mf2 = mfmatches(node2)
950
950
951 if not compareworking:
951 if not compareworking:
952 # flush lists from dirstate before comparing manifests
952 # flush lists from dirstate before comparing manifests
953 modified, added, clean = [], [], []
953 modified, added, clean = [], [], []
954
954
955 # make sure to sort the files so we talk to the disk in a
955 # make sure to sort the files so we talk to the disk in a
956 # reasonable order
956 # reasonable order
957 mf2keys = mf2.keys()
957 mf2keys = mf2.keys()
958 mf2keys.sort()
958 mf2keys.sort()
959 getnode = lambda fn: mf1.get(fn, nullid)
959 getnode = lambda fn: mf1.get(fn, nullid)
960 for fn in mf2keys:
960 for fn in mf2keys:
961 if mf1.has_key(fn):
961 if mf1.has_key(fn):
962 if (mf1.flags(fn) != mf2.flags(fn) or
962 if (mf1.flags(fn) != mf2.flags(fn) or
963 (mf1[fn] != mf2[fn] and
963 (mf1[fn] != mf2[fn] and
964 (mf2[fn] != "" or fcmp(fn, getnode)))):
964 (mf2[fn] != "" or fcmp(fn, getnode)))):
965 modified.append(fn)
965 modified.append(fn)
966 elif list_clean:
966 elif list_clean:
967 clean.append(fn)
967 clean.append(fn)
968 del mf1[fn]
968 del mf1[fn]
969 else:
969 else:
970 added.append(fn)
970 added.append(fn)
971
971
972 removed = mf1.keys()
972 removed = mf1.keys()
973
973
974 # sort and return results:
974 # sort and return results:
975 for l in modified, added, removed, deleted, unknown, ignored, clean:
975 for l in modified, added, removed, deleted, unknown, ignored, clean:
976 l.sort()
976 l.sort()
977 return (modified, added, removed, deleted, unknown, ignored, clean)
977 return (modified, added, removed, deleted, unknown, ignored, clean)
978
978
979 def add(self, list):
979 def add(self, list):
980 wlock = self.wlock()
980 wlock = self.wlock()
981 try:
981 try:
982 for f in list:
982 for f in list:
983 p = self.wjoin(f)
983 p = self.wjoin(f)
984 try:
984 try:
985 st = os.lstat(p)
985 st = os.lstat(p)
986 except:
986 except:
987 self.ui.warn(_("%s does not exist!\n") % f)
987 self.ui.warn(_("%s does not exist!\n") % f)
988 continue
988 continue
989 if st.st_size > 10000000:
989 if st.st_size > 10000000:
990 self.ui.warn(_("%s: files over 10MB may cause memory and"
990 self.ui.warn(_("%s: files over 10MB may cause memory and"
991 " performance problems\n"
991 " performance problems\n"
992 "(use 'hg revert %s' to unadd the file)\n")
992 "(use 'hg revert %s' to unadd the file)\n")
993 % (f, f))
993 % (f, f))
994 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
994 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
995 self.ui.warn(_("%s not added: only files and symlinks "
995 self.ui.warn(_("%s not added: only files and symlinks "
996 "supported currently\n") % f)
996 "supported currently\n") % f)
997 elif self.dirstate[f] in 'an':
997 elif self.dirstate[f] in 'an':
998 self.ui.warn(_("%s already tracked!\n") % f)
998 self.ui.warn(_("%s already tracked!\n") % f)
999 else:
999 else:
1000 self.dirstate.add(f)
1000 self.dirstate.add(f)
1001 finally:
1001 finally:
1002 del wlock
1002 del wlock
1003
1003
1004 def forget(self, list):
1004 def forget(self, list):
1005 wlock = self.wlock()
1005 wlock = self.wlock()
1006 try:
1006 try:
1007 for f in list:
1007 for f in list:
1008 if self.dirstate[f] != 'a':
1008 if self.dirstate[f] != 'a':
1009 self.ui.warn(_("%s not added!\n") % f)
1009 self.ui.warn(_("%s not added!\n") % f)
1010 else:
1010 else:
1011 self.dirstate.forget(f)
1011 self.dirstate.forget(f)
1012 finally:
1012 finally:
1013 del wlock
1013 del wlock
1014
1014
1015 def remove(self, list, unlink=False):
1015 def remove(self, list, unlink=False):
1016 wlock = None
1016 wlock = None
1017 try:
1017 try:
1018 if unlink:
1018 if unlink:
1019 for f in list:
1019 for f in list:
1020 try:
1020 try:
1021 util.unlink(self.wjoin(f))
1021 util.unlink(self.wjoin(f))
1022 except OSError, inst:
1022 except OSError, inst:
1023 if inst.errno != errno.ENOENT:
1023 if inst.errno != errno.ENOENT:
1024 raise
1024 raise
1025 wlock = self.wlock()
1025 wlock = self.wlock()
1026 for f in list:
1026 for f in list:
1027 if unlink and os.path.exists(self.wjoin(f)):
1027 if unlink and os.path.exists(self.wjoin(f)):
1028 self.ui.warn(_("%s still exists!\n") % f)
1028 self.ui.warn(_("%s still exists!\n") % f)
1029 elif self.dirstate[f] == 'a':
1029 elif self.dirstate[f] == 'a':
1030 self.dirstate.forget(f)
1030 self.dirstate.forget(f)
1031 elif f not in self.dirstate:
1031 elif f not in self.dirstate:
1032 self.ui.warn(_("%s not tracked!\n") % f)
1032 self.ui.warn(_("%s not tracked!\n") % f)
1033 else:
1033 else:
1034 self.dirstate.remove(f)
1034 self.dirstate.remove(f)
1035 finally:
1035 finally:
1036 del wlock
1036 del wlock
1037
1037
1038 def undelete(self, list):
1038 def undelete(self, list):
1039 wlock = None
1039 wlock = None
1040 try:
1040 try:
1041 p = self.dirstate.parents()[0]
1041 p = self.dirstate.parents()[0]
1042 mn = self.changelog.read(p)[0]
1042 mn = self.changelog.read(p)[0]
1043 m = self.manifest.read(mn)
1043 m = self.manifest.read(mn)
1044 wlock = self.wlock()
1044 wlock = self.wlock()
1045 for f in list:
1045 for f in list:
1046 if self.dirstate[f] != 'r':
1046 if self.dirstate[f] != 'r':
1047 self.ui.warn("%s not removed!\n" % f)
1047 self.ui.warn("%s not removed!\n" % f)
1048 else:
1048 else:
1049 t = self.file(f).read(m[f])
1049 t = self.file(f).read(m[f])
1050 self.wwrite(f, t, m.flags(f))
1050 self.wwrite(f, t, m.flags(f))
1051 self.dirstate.normal(f)
1051 self.dirstate.normal(f)
1052 finally:
1052 finally:
1053 del wlock
1053 del wlock
1054
1054
1055 def copy(self, source, dest):
1055 def copy(self, source, dest):
1056 wlock = None
1056 wlock = None
1057 try:
1057 try:
1058 p = self.wjoin(dest)
1058 p = self.wjoin(dest)
1059 if not (os.path.exists(p) or os.path.islink(p)):
1059 if not (os.path.exists(p) or os.path.islink(p)):
1060 self.ui.warn(_("%s does not exist!\n") % dest)
1060 self.ui.warn(_("%s does not exist!\n") % dest)
1061 elif not (os.path.isfile(p) or os.path.islink(p)):
1061 elif not (os.path.isfile(p) or os.path.islink(p)):
1062 self.ui.warn(_("copy failed: %s is not a file or a "
1062 self.ui.warn(_("copy failed: %s is not a file or a "
1063 "symbolic link\n") % dest)
1063 "symbolic link\n") % dest)
1064 else:
1064 else:
1065 wlock = self.wlock()
1065 wlock = self.wlock()
1066 if dest not in self.dirstate:
1066 if dest not in self.dirstate:
1067 self.dirstate.add(dest)
1067 self.dirstate.add(dest)
1068 self.dirstate.copy(source, dest)
1068 self.dirstate.copy(source, dest)
1069 finally:
1069 finally:
1070 del wlock
1070 del wlock
1071
1071
1072 def heads(self, start=None):
1072 def heads(self, start=None):
1073 heads = self.changelog.heads(start)
1073 heads = self.changelog.heads(start)
1074 # sort the output in rev descending order
1074 # sort the output in rev descending order
1075 heads = [(-self.changelog.rev(h), h) for h in heads]
1075 heads = [(-self.changelog.rev(h), h) for h in heads]
1076 heads.sort()
1076 heads.sort()
1077 return [n for (r, n) in heads]
1077 return [n for (r, n) in heads]
1078
1078
1079 def branchheads(self, branch, start=None):
1079 def branchheads(self, branch, start=None):
1080 branches = self.branchtags()
1080 branches = self.branchtags()
1081 if branch not in branches:
1081 if branch not in branches:
1082 return []
1082 return []
1083 # The basic algorithm is this:
1083 # The basic algorithm is this:
1084 #
1084 #
1085 # Start from the branch tip since there are no later revisions that can
1085 # Start from the branch tip since there are no later revisions that can
1086 # possibly be in this branch, and the tip is a guaranteed head.
1086 # possibly be in this branch, and the tip is a guaranteed head.
1087 #
1087 #
1088 # Remember the tip's parents as the first ancestors, since these by
1088 # Remember the tip's parents as the first ancestors, since these by
1089 # definition are not heads.
1089 # definition are not heads.
1090 #
1090 #
1091 # Step backwards from the brach tip through all the revisions. We are
1091 # Step backwards from the brach tip through all the revisions. We are
1092 # guaranteed by the rules of Mercurial that we will now be visiting the
1092 # guaranteed by the rules of Mercurial that we will now be visiting the
1093 # nodes in reverse topological order (children before parents).
1093 # nodes in reverse topological order (children before parents).
1094 #
1094 #
1095 # If a revision is one of the ancestors of a head then we can toss it
1095 # If a revision is one of the ancestors of a head then we can toss it
1096 # out of the ancestors set (we've already found it and won't be
1096 # out of the ancestors set (we've already found it and won't be
1097 # visiting it again) and put its parents in the ancestors set.
1097 # visiting it again) and put its parents in the ancestors set.
1098 #
1098 #
1099 # Otherwise, if a revision is in the branch it's another head, since it
1099 # Otherwise, if a revision is in the branch it's another head, since it
1100 # wasn't in the ancestor list of an existing head. So add it to the
1100 # wasn't in the ancestor list of an existing head. So add it to the
1101 # head list, and add its parents to the ancestor list.
1101 # head list, and add its parents to the ancestor list.
1102 #
1102 #
1103 # If it is not in the branch ignore it.
1103 # If it is not in the branch ignore it.
1104 #
1104 #
1105 # Once we have a list of heads, use nodesbetween to filter out all the
1105 # Once we have a list of heads, use nodesbetween to filter out all the
1106 # heads that cannot be reached from startrev. There may be a more
1106 # heads that cannot be reached from startrev. There may be a more
1107 # efficient way to do this as part of the previous algorithm.
1107 # efficient way to do this as part of the previous algorithm.
1108
1108
1109 set = util.set
1109 set = util.set
1110 heads = [self.changelog.rev(branches[branch])]
1110 heads = [self.changelog.rev(branches[branch])]
1111 # Don't care if ancestors contains nullrev or not.
1111 # Don't care if ancestors contains nullrev or not.
1112 ancestors = set(self.changelog.parentrevs(heads[0]))
1112 ancestors = set(self.changelog.parentrevs(heads[0]))
1113 for rev in xrange(heads[0] - 1, nullrev, -1):
1113 for rev in xrange(heads[0] - 1, nullrev, -1):
1114 if rev in ancestors:
1114 if rev in ancestors:
1115 ancestors.update(self.changelog.parentrevs(rev))
1115 ancestors.update(self.changelog.parentrevs(rev))
1116 ancestors.remove(rev)
1116 ancestors.remove(rev)
1117 elif self.changectx(rev).branch() == branch:
1117 elif self.changectx(rev).branch() == branch:
1118 heads.append(rev)
1118 heads.append(rev)
1119 ancestors.update(self.changelog.parentrevs(rev))
1119 ancestors.update(self.changelog.parentrevs(rev))
1120 heads = [self.changelog.node(rev) for rev in heads]
1120 heads = [self.changelog.node(rev) for rev in heads]
1121 if start is not None:
1121 if start is not None:
1122 heads = self.changelog.nodesbetween([start], heads)[2]
1122 heads = self.changelog.nodesbetween([start], heads)[2]
1123 return heads
1123 return heads
1124
1124
1125 def branches(self, nodes):
1125 def branches(self, nodes):
1126 if not nodes:
1126 if not nodes:
1127 nodes = [self.changelog.tip()]
1127 nodes = [self.changelog.tip()]
1128 b = []
1128 b = []
1129 for n in nodes:
1129 for n in nodes:
1130 t = n
1130 t = n
1131 while 1:
1131 while 1:
1132 p = self.changelog.parents(n)
1132 p = self.changelog.parents(n)
1133 if p[1] != nullid or p[0] == nullid:
1133 if p[1] != nullid or p[0] == nullid:
1134 b.append((t, n, p[0], p[1]))
1134 b.append((t, n, p[0], p[1]))
1135 break
1135 break
1136 n = p[0]
1136 n = p[0]
1137 return b
1137 return b
1138
1138
1139 def between(self, pairs):
1139 def between(self, pairs):
1140 r = []
1140 r = []
1141
1141
1142 for top, bottom in pairs:
1142 for top, bottom in pairs:
1143 n, l, i = top, [], 0
1143 n, l, i = top, [], 0
1144 f = 1
1144 f = 1
1145
1145
1146 while n != bottom:
1146 while n != bottom:
1147 p = self.changelog.parents(n)[0]
1147 p = self.changelog.parents(n)[0]
1148 if i == f:
1148 if i == f:
1149 l.append(n)
1149 l.append(n)
1150 f = f * 2
1150 f = f * 2
1151 n = p
1151 n = p
1152 i += 1
1152 i += 1
1153
1153
1154 r.append(l)
1154 r.append(l)
1155
1155
1156 return r
1156 return r
1157
1157
1158 def findincoming(self, remote, base=None, heads=None, force=False):
1158 def findincoming(self, remote, base=None, heads=None, force=False):
1159 """Return list of roots of the subsets of missing nodes from remote
1159 """Return list of roots of the subsets of missing nodes from remote
1160
1160
1161 If base dict is specified, assume that these nodes and their parents
1161 If base dict is specified, assume that these nodes and their parents
1162 exist on the remote side and that no child of a node of base exists
1162 exist on the remote side and that no child of a node of base exists
1163 in both remote and self.
1163 in both remote and self.
1164 Furthermore base will be updated to include the nodes that exists
1164 Furthermore base will be updated to include the nodes that exists
1165 in self and remote but no children exists in self and remote.
1165 in self and remote but no children exists in self and remote.
1166 If a list of heads is specified, return only nodes which are heads
1166 If a list of heads is specified, return only nodes which are heads
1167 or ancestors of these heads.
1167 or ancestors of these heads.
1168
1168
1169 All the ancestors of base are in self and in remote.
1169 All the ancestors of base are in self and in remote.
1170 All the descendants of the list returned are missing in self.
1170 All the descendants of the list returned are missing in self.
1171 (and so we know that the rest of the nodes are missing in remote, see
1171 (and so we know that the rest of the nodes are missing in remote, see
1172 outgoing)
1172 outgoing)
1173 """
1173 """
1174 m = self.changelog.nodemap
1174 m = self.changelog.nodemap
1175 search = []
1175 search = []
1176 fetch = {}
1176 fetch = {}
1177 seen = {}
1177 seen = {}
1178 seenbranch = {}
1178 seenbranch = {}
1179 if base == None:
1179 if base == None:
1180 base = {}
1180 base = {}
1181
1181
1182 if not heads:
1182 if not heads:
1183 heads = remote.heads()
1183 heads = remote.heads()
1184
1184
1185 if self.changelog.tip() == nullid:
1185 if self.changelog.tip() == nullid:
1186 base[nullid] = 1
1186 base[nullid] = 1
1187 if heads != [nullid]:
1187 if heads != [nullid]:
1188 return [nullid]
1188 return [nullid]
1189 return []
1189 return []
1190
1190
1191 # assume we're closer to the tip than the root
1191 # assume we're closer to the tip than the root
1192 # and start by examining the heads
1192 # and start by examining the heads
1193 self.ui.status(_("searching for changes\n"))
1193 self.ui.status(_("searching for changes\n"))
1194
1194
1195 unknown = []
1195 unknown = []
1196 for h in heads:
1196 for h in heads:
1197 if h not in m:
1197 if h not in m:
1198 unknown.append(h)
1198 unknown.append(h)
1199 else:
1199 else:
1200 base[h] = 1
1200 base[h] = 1
1201
1201
1202 if not unknown:
1202 if not unknown:
1203 return []
1203 return []
1204
1204
1205 req = dict.fromkeys(unknown)
1205 req = dict.fromkeys(unknown)
1206 reqcnt = 0
1206 reqcnt = 0
1207
1207
1208 # search through remote branches
1208 # search through remote branches
1209 # a 'branch' here is a linear segment of history, with four parts:
1209 # a 'branch' here is a linear segment of history, with four parts:
1210 # head, root, first parent, second parent
1210 # head, root, first parent, second parent
1211 # (a branch always has two parents (or none) by definition)
1211 # (a branch always has two parents (or none) by definition)
1212 unknown = remote.branches(unknown)
1212 unknown = remote.branches(unknown)
1213 while unknown:
1213 while unknown:
1214 r = []
1214 r = []
1215 while unknown:
1215 while unknown:
1216 n = unknown.pop(0)
1216 n = unknown.pop(0)
1217 if n[0] in seen:
1217 if n[0] in seen:
1218 continue
1218 continue
1219
1219
1220 self.ui.debug(_("examining %s:%s\n")
1220 self.ui.debug(_("examining %s:%s\n")
1221 % (short(n[0]), short(n[1])))
1221 % (short(n[0]), short(n[1])))
1222 if n[0] == nullid: # found the end of the branch
1222 if n[0] == nullid: # found the end of the branch
1223 pass
1223 pass
1224 elif n in seenbranch:
1224 elif n in seenbranch:
1225 self.ui.debug(_("branch already found\n"))
1225 self.ui.debug(_("branch already found\n"))
1226 continue
1226 continue
1227 elif n[1] and n[1] in m: # do we know the base?
1227 elif n[1] and n[1] in m: # do we know the base?
1228 self.ui.debug(_("found incomplete branch %s:%s\n")
1228 self.ui.debug(_("found incomplete branch %s:%s\n")
1229 % (short(n[0]), short(n[1])))
1229 % (short(n[0]), short(n[1])))
1230 search.append(n) # schedule branch range for scanning
1230 search.append(n) # schedule branch range for scanning
1231 seenbranch[n] = 1
1231 seenbranch[n] = 1
1232 else:
1232 else:
1233 if n[1] not in seen and n[1] not in fetch:
1233 if n[1] not in seen and n[1] not in fetch:
1234 if n[2] in m and n[3] in m:
1234 if n[2] in m and n[3] in m:
1235 self.ui.debug(_("found new changeset %s\n") %
1235 self.ui.debug(_("found new changeset %s\n") %
1236 short(n[1]))
1236 short(n[1]))
1237 fetch[n[1]] = 1 # earliest unknown
1237 fetch[n[1]] = 1 # earliest unknown
1238 for p in n[2:4]:
1238 for p in n[2:4]:
1239 if p in m:
1239 if p in m:
1240 base[p] = 1 # latest known
1240 base[p] = 1 # latest known
1241
1241
1242 for p in n[2:4]:
1242 for p in n[2:4]:
1243 if p not in req and p not in m:
1243 if p not in req and p not in m:
1244 r.append(p)
1244 r.append(p)
1245 req[p] = 1
1245 req[p] = 1
1246 seen[n[0]] = 1
1246 seen[n[0]] = 1
1247
1247
1248 if r:
1248 if r:
1249 reqcnt += 1
1249 reqcnt += 1
1250 self.ui.debug(_("request %d: %s\n") %
1250 self.ui.debug(_("request %d: %s\n") %
1251 (reqcnt, " ".join(map(short, r))))
1251 (reqcnt, " ".join(map(short, r))))
1252 for p in xrange(0, len(r), 10):
1252 for p in xrange(0, len(r), 10):
1253 for b in remote.branches(r[p:p+10]):
1253 for b in remote.branches(r[p:p+10]):
1254 self.ui.debug(_("received %s:%s\n") %
1254 self.ui.debug(_("received %s:%s\n") %
1255 (short(b[0]), short(b[1])))
1255 (short(b[0]), short(b[1])))
1256 unknown.append(b)
1256 unknown.append(b)
1257
1257
1258 # do binary search on the branches we found
1258 # do binary search on the branches we found
1259 while search:
1259 while search:
1260 n = search.pop(0)
1260 n = search.pop(0)
1261 reqcnt += 1
1261 reqcnt += 1
1262 l = remote.between([(n[0], n[1])])[0]
1262 l = remote.between([(n[0], n[1])])[0]
1263 l.append(n[1])
1263 l.append(n[1])
1264 p = n[0]
1264 p = n[0]
1265 f = 1
1265 f = 1
1266 for i in l:
1266 for i in l:
1267 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1267 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1268 if i in m:
1268 if i in m:
1269 if f <= 2:
1269 if f <= 2:
1270 self.ui.debug(_("found new branch changeset %s\n") %
1270 self.ui.debug(_("found new branch changeset %s\n") %
1271 short(p))
1271 short(p))
1272 fetch[p] = 1
1272 fetch[p] = 1
1273 base[i] = 1
1273 base[i] = 1
1274 else:
1274 else:
1275 self.ui.debug(_("narrowed branch search to %s:%s\n")
1275 self.ui.debug(_("narrowed branch search to %s:%s\n")
1276 % (short(p), short(i)))
1276 % (short(p), short(i)))
1277 search.append((p, i))
1277 search.append((p, i))
1278 break
1278 break
1279 p, f = i, f * 2
1279 p, f = i, f * 2
1280
1280
1281 # sanity check our fetch list
1281 # sanity check our fetch list
1282 for f in fetch.keys():
1282 for f in fetch.keys():
1283 if f in m:
1283 if f in m:
1284 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1284 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1285
1285
1286 if base.keys() == [nullid]:
1286 if base.keys() == [nullid]:
1287 if force:
1287 if force:
1288 self.ui.warn(_("warning: repository is unrelated\n"))
1288 self.ui.warn(_("warning: repository is unrelated\n"))
1289 else:
1289 else:
1290 raise util.Abort(_("repository is unrelated"))
1290 raise util.Abort(_("repository is unrelated"))
1291
1291
1292 self.ui.debug(_("found new changesets starting at ") +
1292 self.ui.debug(_("found new changesets starting at ") +
1293 " ".join([short(f) for f in fetch]) + "\n")
1293 " ".join([short(f) for f in fetch]) + "\n")
1294
1294
1295 self.ui.debug(_("%d total queries\n") % reqcnt)
1295 self.ui.debug(_("%d total queries\n") % reqcnt)
1296
1296
1297 return fetch.keys()
1297 return fetch.keys()
1298
1298
1299 def findoutgoing(self, remote, base=None, heads=None, force=False):
1299 def findoutgoing(self, remote, base=None, heads=None, force=False):
1300 """Return list of nodes that are roots of subsets not in remote
1300 """Return list of nodes that are roots of subsets not in remote
1301
1301
1302 If base dict is specified, assume that these nodes and their parents
1302 If base dict is specified, assume that these nodes and their parents
1303 exist on the remote side.
1303 exist on the remote side.
1304 If a list of heads is specified, return only nodes which are heads
1304 If a list of heads is specified, return only nodes which are heads
1305 or ancestors of these heads, and return a second element which
1305 or ancestors of these heads, and return a second element which
1306 contains all remote heads which get new children.
1306 contains all remote heads which get new children.
1307 """
1307 """
1308 if base == None:
1308 if base == None:
1309 base = {}
1309 base = {}
1310 self.findincoming(remote, base, heads, force=force)
1310 self.findincoming(remote, base, heads, force=force)
1311
1311
1312 self.ui.debug(_("common changesets up to ")
1312 self.ui.debug(_("common changesets up to ")
1313 + " ".join(map(short, base.keys())) + "\n")
1313 + " ".join(map(short, base.keys())) + "\n")
1314
1314
1315 remain = dict.fromkeys(self.changelog.nodemap)
1315 remain = dict.fromkeys(self.changelog.nodemap)
1316
1316
1317 # prune everything remote has from the tree
1317 # prune everything remote has from the tree
1318 del remain[nullid]
1318 del remain[nullid]
1319 remove = base.keys()
1319 remove = base.keys()
1320 while remove:
1320 while remove:
1321 n = remove.pop(0)
1321 n = remove.pop(0)
1322 if n in remain:
1322 if n in remain:
1323 del remain[n]
1323 del remain[n]
1324 for p in self.changelog.parents(n):
1324 for p in self.changelog.parents(n):
1325 remove.append(p)
1325 remove.append(p)
1326
1326
1327 # find every node whose parents have been pruned
1327 # find every node whose parents have been pruned
1328 subset = []
1328 subset = []
1329 # find every remote head that will get new children
1329 # find every remote head that will get new children
1330 updated_heads = {}
1330 updated_heads = {}
1331 for n in remain:
1331 for n in remain:
1332 p1, p2 = self.changelog.parents(n)
1332 p1, p2 = self.changelog.parents(n)
1333 if p1 not in remain and p2 not in remain:
1333 if p1 not in remain and p2 not in remain:
1334 subset.append(n)
1334 subset.append(n)
1335 if heads:
1335 if heads:
1336 if p1 in heads:
1336 if p1 in heads:
1337 updated_heads[p1] = True
1337 updated_heads[p1] = True
1338 if p2 in heads:
1338 if p2 in heads:
1339 updated_heads[p2] = True
1339 updated_heads[p2] = True
1340
1340
1341 # this is the set of all roots we have to push
1341 # this is the set of all roots we have to push
1342 if heads:
1342 if heads:
1343 return subset, updated_heads.keys()
1343 return subset, updated_heads.keys()
1344 else:
1344 else:
1345 return subset
1345 return subset
1346
1346
1347 def pull(self, remote, heads=None, force=False):
1347 def pull(self, remote, heads=None, force=False):
1348 lock = self.lock()
1348 lock = self.lock()
1349 try:
1349 try:
1350 fetch = self.findincoming(remote, force=force)
1350 fetch = self.findincoming(remote, force=force)
1351 if fetch == [nullid]:
1351 if fetch == [nullid]:
1352 self.ui.status(_("requesting all changes\n"))
1352 self.ui.status(_("requesting all changes\n"))
1353
1353
1354 if not fetch:
1354 if not fetch:
1355 self.ui.status(_("no changes found\n"))
1355 self.ui.status(_("no changes found\n"))
1356 return 0
1356 return 0
1357
1357
1358 if heads is None:
1358 if heads is None:
1359 cg = remote.changegroup(fetch, 'pull')
1359 cg = remote.changegroup(fetch, 'pull')
1360 else:
1360 else:
1361 if 'changegroupsubset' not in remote.capabilities:
1361 if 'changegroupsubset' not in remote.capabilities:
1362 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1362 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1363 cg = remote.changegroupsubset(fetch, heads, 'pull')
1363 cg = remote.changegroupsubset(fetch, heads, 'pull')
1364 return self.addchangegroup(cg, 'pull', remote.url())
1364 return self.addchangegroup(cg, 'pull', remote.url())
1365 finally:
1365 finally:
1366 del lock
1366 del lock
1367
1367
1368 def push(self, remote, force=False, revs=None):
1368 def push(self, remote, force=False, revs=None):
1369 # there are two ways to push to remote repo:
1369 # there are two ways to push to remote repo:
1370 #
1370 #
1371 # addchangegroup assumes local user can lock remote
1371 # addchangegroup assumes local user can lock remote
1372 # repo (local filesystem, old ssh servers).
1372 # repo (local filesystem, old ssh servers).
1373 #
1373 #
1374 # unbundle assumes local user cannot lock remote repo (new ssh
1374 # unbundle assumes local user cannot lock remote repo (new ssh
1375 # servers, http servers).
1375 # servers, http servers).
1376
1376
1377 if remote.capable('unbundle'):
1377 if remote.capable('unbundle'):
1378 return self.push_unbundle(remote, force, revs)
1378 return self.push_unbundle(remote, force, revs)
1379 return self.push_addchangegroup(remote, force, revs)
1379 return self.push_addchangegroup(remote, force, revs)
1380
1380
1381 def prepush(self, remote, force, revs):
1381 def prepush(self, remote, force, revs):
1382 base = {}
1382 base = {}
1383 remote_heads = remote.heads()
1383 remote_heads = remote.heads()
1384 inc = self.findincoming(remote, base, remote_heads, force=force)
1384 inc = self.findincoming(remote, base, remote_heads, force=force)
1385
1385
1386 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1386 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1387 if revs is not None:
1387 if revs is not None:
1388 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1388 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1389 else:
1389 else:
1390 bases, heads = update, self.changelog.heads()
1390 bases, heads = update, self.changelog.heads()
1391
1391
1392 if not bases:
1392 if not bases:
1393 self.ui.status(_("no changes found\n"))
1393 self.ui.status(_("no changes found\n"))
1394 return None, 1
1394 return None, 1
1395 elif not force:
1395 elif not force:
1396 # check if we're creating new remote heads
1396 # check if we're creating new remote heads
1397 # to be a remote head after push, node must be either
1397 # to be a remote head after push, node must be either
1398 # - unknown locally
1398 # - unknown locally
1399 # - a local outgoing head descended from update
1399 # - a local outgoing head descended from update
1400 # - a remote head that's known locally and not
1400 # - a remote head that's known locally and not
1401 # ancestral to an outgoing head
1401 # ancestral to an outgoing head
1402
1402
1403 warn = 0
1403 warn = 0
1404
1404
1405 if remote_heads == [nullid]:
1405 if remote_heads == [nullid]:
1406 warn = 0
1406 warn = 0
1407 elif not revs and len(heads) > len(remote_heads):
1407 elif not revs and len(heads) > len(remote_heads):
1408 warn = 1
1408 warn = 1
1409 else:
1409 else:
1410 newheads = list(heads)
1410 newheads = list(heads)
1411 for r in remote_heads:
1411 for r in remote_heads:
1412 if r in self.changelog.nodemap:
1412 if r in self.changelog.nodemap:
1413 desc = self.changelog.heads(r, heads)
1413 desc = self.changelog.heads(r, heads)
1414 l = [h for h in heads if h in desc]
1414 l = [h for h in heads if h in desc]
1415 if not l:
1415 if not l:
1416 newheads.append(r)
1416 newheads.append(r)
1417 else:
1417 else:
1418 newheads.append(r)
1418 newheads.append(r)
1419 if len(newheads) > len(remote_heads):
1419 if len(newheads) > len(remote_heads):
1420 warn = 1
1420 warn = 1
1421
1421
1422 if warn:
1422 if warn:
1423 self.ui.warn(_("abort: push creates new remote branches!\n"))
1423 self.ui.warn(_("abort: push creates new remote branches!\n"))
1424 self.ui.status(_("(did you forget to merge?"
1424 self.ui.status(_("(did you forget to merge?"
1425 " use push -f to force)\n"))
1425 " use push -f to force)\n"))
1426 return None, 1
1426 return None, 1
1427 elif inc:
1427 elif inc:
1428 self.ui.warn(_("note: unsynced remote changes!\n"))
1428 self.ui.warn(_("note: unsynced remote changes!\n"))
1429
1429
1430
1430
1431 if revs is None:
1431 if revs is None:
1432 cg = self.changegroup(update, 'push')
1432 cg = self.changegroup(update, 'push')
1433 else:
1433 else:
1434 cg = self.changegroupsubset(update, revs, 'push')
1434 cg = self.changegroupsubset(update, revs, 'push')
1435 return cg, remote_heads
1435 return cg, remote_heads
1436
1436
1437 def push_addchangegroup(self, remote, force, revs):
1437 def push_addchangegroup(self, remote, force, revs):
1438 lock = remote.lock()
1438 lock = remote.lock()
1439 try:
1439 try:
1440 ret = self.prepush(remote, force, revs)
1440 ret = self.prepush(remote, force, revs)
1441 if ret[0] is not None:
1441 if ret[0] is not None:
1442 cg, remote_heads = ret
1442 cg, remote_heads = ret
1443 return remote.addchangegroup(cg, 'push', self.url())
1443 return remote.addchangegroup(cg, 'push', self.url())
1444 return ret[1]
1444 return ret[1]
1445 finally:
1445 finally:
1446 del lock
1446 del lock
1447
1447
1448 def push_unbundle(self, remote, force, revs):
1448 def push_unbundle(self, remote, force, revs):
1449 # local repo finds heads on server, finds out what revs it
1449 # local repo finds heads on server, finds out what revs it
1450 # must push. once revs transferred, if server finds it has
1450 # must push. once revs transferred, if server finds it has
1451 # different heads (someone else won commit/push race), server
1451 # different heads (someone else won commit/push race), server
1452 # aborts.
1452 # aborts.
1453
1453
1454 ret = self.prepush(remote, force, revs)
1454 ret = self.prepush(remote, force, revs)
1455 if ret[0] is not None:
1455 if ret[0] is not None:
1456 cg, remote_heads = ret
1456 cg, remote_heads = ret
1457 if force: remote_heads = ['force']
1457 if force: remote_heads = ['force']
1458 return remote.unbundle(cg, remote_heads, 'push')
1458 return remote.unbundle(cg, remote_heads, 'push')
1459 return ret[1]
1459 return ret[1]
1460
1460
1461 def changegroupinfo(self, nodes):
1461 def changegroupinfo(self, nodes):
1462 self.ui.note(_("%d changesets found\n") % len(nodes))
1462 self.ui.note(_("%d changesets found\n") % len(nodes))
1463 if self.ui.debugflag:
1463 if self.ui.debugflag:
1464 self.ui.debug(_("List of changesets:\n"))
1464 self.ui.debug(_("List of changesets:\n"))
1465 for node in nodes:
1465 for node in nodes:
1466 self.ui.debug("%s\n" % hex(node))
1466 self.ui.debug("%s\n" % hex(node))
1467
1467
1468 def changegroupsubset(self, bases, heads, source):
1468 def changegroupsubset(self, bases, heads, source):
1469 """This function generates a changegroup consisting of all the nodes
1469 """This function generates a changegroup consisting of all the nodes
1470 that are descendents of any of the bases, and ancestors of any of
1470 that are descendents of any of the bases, and ancestors of any of
1471 the heads.
1471 the heads.
1472
1472
1473 It is fairly complex as determining which filenodes and which
1473 It is fairly complex as determining which filenodes and which
1474 manifest nodes need to be included for the changeset to be complete
1474 manifest nodes need to be included for the changeset to be complete
1475 is non-trivial.
1475 is non-trivial.
1476
1476
1477 Another wrinkle is doing the reverse, figuring out which changeset in
1477 Another wrinkle is doing the reverse, figuring out which changeset in
1478 the changegroup a particular filenode or manifestnode belongs to."""
1478 the changegroup a particular filenode or manifestnode belongs to."""
1479
1479
1480 self.hook('preoutgoing', throw=True, source=source)
1480 self.hook('preoutgoing', throw=True, source=source)
1481
1481
1482 # Set up some initial variables
1482 # Set up some initial variables
1483 # Make it easy to refer to self.changelog
1483 # Make it easy to refer to self.changelog
1484 cl = self.changelog
1484 cl = self.changelog
1485 # msng is short for missing - compute the list of changesets in this
1485 # msng is short for missing - compute the list of changesets in this
1486 # changegroup.
1486 # changegroup.
1487 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1487 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1488 self.changegroupinfo(msng_cl_lst)
1488 self.changegroupinfo(msng_cl_lst)
1489 # Some bases may turn out to be superfluous, and some heads may be
1489 # Some bases may turn out to be superfluous, and some heads may be
1490 # too. nodesbetween will return the minimal set of bases and heads
1490 # too. nodesbetween will return the minimal set of bases and heads
1491 # necessary to re-create the changegroup.
1491 # necessary to re-create the changegroup.
1492
1492
1493 # Known heads are the list of heads that it is assumed the recipient
1493 # Known heads are the list of heads that it is assumed the recipient
1494 # of this changegroup will know about.
1494 # of this changegroup will know about.
1495 knownheads = {}
1495 knownheads = {}
1496 # We assume that all parents of bases are known heads.
1496 # We assume that all parents of bases are known heads.
1497 for n in bases:
1497 for n in bases:
1498 for p in cl.parents(n):
1498 for p in cl.parents(n):
1499 if p != nullid:
1499 if p != nullid:
1500 knownheads[p] = 1
1500 knownheads[p] = 1
1501 knownheads = knownheads.keys()
1501 knownheads = knownheads.keys()
1502 if knownheads:
1502 if knownheads:
1503 # Now that we know what heads are known, we can compute which
1503 # Now that we know what heads are known, we can compute which
1504 # changesets are known. The recipient must know about all
1504 # changesets are known. The recipient must know about all
1505 # changesets required to reach the known heads from the null
1505 # changesets required to reach the known heads from the null
1506 # changeset.
1506 # changeset.
1507 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1507 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1508 junk = None
1508 junk = None
1509 # Transform the list into an ersatz set.
1509 # Transform the list into an ersatz set.
1510 has_cl_set = dict.fromkeys(has_cl_set)
1510 has_cl_set = dict.fromkeys(has_cl_set)
1511 else:
1511 else:
1512 # If there were no known heads, the recipient cannot be assumed to
1512 # If there were no known heads, the recipient cannot be assumed to
1513 # know about any changesets.
1513 # know about any changesets.
1514 has_cl_set = {}
1514 has_cl_set = {}
1515
1515
1516 # Make it easy to refer to self.manifest
1516 # Make it easy to refer to self.manifest
1517 mnfst = self.manifest
1517 mnfst = self.manifest
1518 # We don't know which manifests are missing yet
1518 # We don't know which manifests are missing yet
1519 msng_mnfst_set = {}
1519 msng_mnfst_set = {}
1520 # Nor do we know which filenodes are missing.
1520 # Nor do we know which filenodes are missing.
1521 msng_filenode_set = {}
1521 msng_filenode_set = {}
1522
1522
1523 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1523 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1524 junk = None
1524 junk = None
1525
1525
1526 # A changeset always belongs to itself, so the changenode lookup
1526 # A changeset always belongs to itself, so the changenode lookup
1527 # function for a changenode is identity.
1527 # function for a changenode is identity.
1528 def identity(x):
1528 def identity(x):
1529 return x
1529 return x
1530
1530
1531 # A function generating function. Sets up an environment for the
1531 # A function generating function. Sets up an environment for the
1532 # inner function.
1532 # inner function.
1533 def cmp_by_rev_func(revlog):
1533 def cmp_by_rev_func(revlog):
1534 # Compare two nodes by their revision number in the environment's
1534 # Compare two nodes by their revision number in the environment's
1535 # revision history. Since the revision number both represents the
1535 # revision history. Since the revision number both represents the
1536 # most efficient order to read the nodes in, and represents a
1536 # most efficient order to read the nodes in, and represents a
1537 # topological sorting of the nodes, this function is often useful.
1537 # topological sorting of the nodes, this function is often useful.
1538 def cmp_by_rev(a, b):
1538 def cmp_by_rev(a, b):
1539 return cmp(revlog.rev(a), revlog.rev(b))
1539 return cmp(revlog.rev(a), revlog.rev(b))
1540 return cmp_by_rev
1540 return cmp_by_rev
1541
1541
1542 # If we determine that a particular file or manifest node must be a
1542 # If we determine that a particular file or manifest node must be a
1543 # node that the recipient of the changegroup will already have, we can
1543 # node that the recipient of the changegroup will already have, we can
1544 # also assume the recipient will have all the parents. This function
1544 # also assume the recipient will have all the parents. This function
1545 # prunes them from the set of missing nodes.
1545 # prunes them from the set of missing nodes.
1546 def prune_parents(revlog, hasset, msngset):
1546 def prune_parents(revlog, hasset, msngset):
1547 haslst = hasset.keys()
1547 haslst = hasset.keys()
1548 haslst.sort(cmp_by_rev_func(revlog))
1548 haslst.sort(cmp_by_rev_func(revlog))
1549 for node in haslst:
1549 for node in haslst:
1550 parentlst = [p for p in revlog.parents(node) if p != nullid]
1550 parentlst = [p for p in revlog.parents(node) if p != nullid]
1551 while parentlst:
1551 while parentlst:
1552 n = parentlst.pop()
1552 n = parentlst.pop()
1553 if n not in hasset:
1553 if n not in hasset:
1554 hasset[n] = 1
1554 hasset[n] = 1
1555 p = [p for p in revlog.parents(n) if p != nullid]
1555 p = [p for p in revlog.parents(n) if p != nullid]
1556 parentlst.extend(p)
1556 parentlst.extend(p)
1557 for n in hasset:
1557 for n in hasset:
1558 msngset.pop(n, None)
1558 msngset.pop(n, None)
1559
1559
1560 # This is a function generating function used to set up an environment
1560 # This is a function generating function used to set up an environment
1561 # for the inner function to execute in.
1561 # for the inner function to execute in.
1562 def manifest_and_file_collector(changedfileset):
1562 def manifest_and_file_collector(changedfileset):
1563 # This is an information gathering function that gathers
1563 # This is an information gathering function that gathers
1564 # information from each changeset node that goes out as part of
1564 # information from each changeset node that goes out as part of
1565 # the changegroup. The information gathered is a list of which
1565 # the changegroup. The information gathered is a list of which
1566 # manifest nodes are potentially required (the recipient may
1566 # manifest nodes are potentially required (the recipient may
1567 # already have them) and total list of all files which were
1567 # already have them) and total list of all files which were
1568 # changed in any changeset in the changegroup.
1568 # changed in any changeset in the changegroup.
1569 #
1569 #
1570 # We also remember the first changenode we saw any manifest
1570 # We also remember the first changenode we saw any manifest
1571 # referenced by so we can later determine which changenode 'owns'
1571 # referenced by so we can later determine which changenode 'owns'
1572 # the manifest.
1572 # the manifest.
1573 def collect_manifests_and_files(clnode):
1573 def collect_manifests_and_files(clnode):
1574 c = cl.read(clnode)
1574 c = cl.read(clnode)
1575 for f in c[3]:
1575 for f in c[3]:
1576 # This is to make sure we only have one instance of each
1576 # This is to make sure we only have one instance of each
1577 # filename string for each filename.
1577 # filename string for each filename.
1578 changedfileset.setdefault(f, f)
1578 changedfileset.setdefault(f, f)
1579 msng_mnfst_set.setdefault(c[0], clnode)
1579 msng_mnfst_set.setdefault(c[0], clnode)
1580 return collect_manifests_and_files
1580 return collect_manifests_and_files
1581
1581
1582 # Figure out which manifest nodes (of the ones we think might be part
1582 # Figure out which manifest nodes (of the ones we think might be part
1583 # of the changegroup) the recipient must know about and remove them
1583 # of the changegroup) the recipient must know about and remove them
1584 # from the changegroup.
1584 # from the changegroup.
1585 def prune_manifests():
1585 def prune_manifests():
1586 has_mnfst_set = {}
1586 has_mnfst_set = {}
1587 for n in msng_mnfst_set:
1587 for n in msng_mnfst_set:
1588 # If a 'missing' manifest thinks it belongs to a changenode
1588 # If a 'missing' manifest thinks it belongs to a changenode
1589 # the recipient is assumed to have, obviously the recipient
1589 # the recipient is assumed to have, obviously the recipient
1590 # must have that manifest.
1590 # must have that manifest.
1591 linknode = cl.node(mnfst.linkrev(n))
1591 linknode = cl.node(mnfst.linkrev(n))
1592 if linknode in has_cl_set:
1592 if linknode in has_cl_set:
1593 has_mnfst_set[n] = 1
1593 has_mnfst_set[n] = 1
1594 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1594 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1595
1595
1596 # Use the information collected in collect_manifests_and_files to say
1596 # Use the information collected in collect_manifests_and_files to say
1597 # which changenode any manifestnode belongs to.
1597 # which changenode any manifestnode belongs to.
1598 def lookup_manifest_link(mnfstnode):
1598 def lookup_manifest_link(mnfstnode):
1599 return msng_mnfst_set[mnfstnode]
1599 return msng_mnfst_set[mnfstnode]
1600
1600
1601 # A function generating function that sets up the initial environment
1601 # A function generating function that sets up the initial environment
1602 # the inner function.
1602 # the inner function.
1603 def filenode_collector(changedfiles):
1603 def filenode_collector(changedfiles):
1604 next_rev = [0]
1604 next_rev = [0]
1605 # This gathers information from each manifestnode included in the
1605 # This gathers information from each manifestnode included in the
1606 # changegroup about which filenodes the manifest node references
1606 # changegroup about which filenodes the manifest node references
1607 # so we can include those in the changegroup too.
1607 # so we can include those in the changegroup too.
1608 #
1608 #
1609 # It also remembers which changenode each filenode belongs to. It
1609 # It also remembers which changenode each filenode belongs to. It
1610 # does this by assuming the a filenode belongs to the changenode
1610 # does this by assuming the a filenode belongs to the changenode
1611 # the first manifest that references it belongs to.
1611 # the first manifest that references it belongs to.
1612 def collect_msng_filenodes(mnfstnode):
1612 def collect_msng_filenodes(mnfstnode):
1613 r = mnfst.rev(mnfstnode)
1613 r = mnfst.rev(mnfstnode)
1614 if r == next_rev[0]:
1614 if r == next_rev[0]:
1615 # If the last rev we looked at was the one just previous,
1615 # If the last rev we looked at was the one just previous,
1616 # we only need to see a diff.
1616 # we only need to see a diff.
1617 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1617 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1618 # For each line in the delta
1618 # For each line in the delta
1619 for dline in delta.splitlines():
1619 for dline in delta.splitlines():
1620 # get the filename and filenode for that line
1620 # get the filename and filenode for that line
1621 f, fnode = dline.split('\0')
1621 f, fnode = dline.split('\0')
1622 fnode = bin(fnode[:40])
1622 fnode = bin(fnode[:40])
1623 f = changedfiles.get(f, None)
1623 f = changedfiles.get(f, None)
1624 # And if the file is in the list of files we care
1624 # And if the file is in the list of files we care
1625 # about.
1625 # about.
1626 if f is not None:
1626 if f is not None:
1627 # Get the changenode this manifest belongs to
1627 # Get the changenode this manifest belongs to
1628 clnode = msng_mnfst_set[mnfstnode]
1628 clnode = msng_mnfst_set[mnfstnode]
1629 # Create the set of filenodes for the file if
1629 # Create the set of filenodes for the file if
1630 # there isn't one already.
1630 # there isn't one already.
1631 ndset = msng_filenode_set.setdefault(f, {})
1631 ndset = msng_filenode_set.setdefault(f, {})
1632 # And set the filenode's changelog node to the
1632 # And set the filenode's changelog node to the
1633 # manifest's if it hasn't been set already.
1633 # manifest's if it hasn't been set already.
1634 ndset.setdefault(fnode, clnode)
1634 ndset.setdefault(fnode, clnode)
1635 else:
1635 else:
1636 # Otherwise we need a full manifest.
1636 # Otherwise we need a full manifest.
1637 m = mnfst.read(mnfstnode)
1637 m = mnfst.read(mnfstnode)
1638 # For every file in we care about.
1638 # For every file in we care about.
1639 for f in changedfiles:
1639 for f in changedfiles:
1640 fnode = m.get(f, None)
1640 fnode = m.get(f, None)
1641 # If it's in the manifest
1641 # If it's in the manifest
1642 if fnode is not None:
1642 if fnode is not None:
1643 # See comments above.
1643 # See comments above.
1644 clnode = msng_mnfst_set[mnfstnode]
1644 clnode = msng_mnfst_set[mnfstnode]
1645 ndset = msng_filenode_set.setdefault(f, {})
1645 ndset = msng_filenode_set.setdefault(f, {})
1646 ndset.setdefault(fnode, clnode)
1646 ndset.setdefault(fnode, clnode)
1647 # Remember the revision we hope to see next.
1647 # Remember the revision we hope to see next.
1648 next_rev[0] = r + 1
1648 next_rev[0] = r + 1
1649 return collect_msng_filenodes
1649 return collect_msng_filenodes
1650
1650
1651 # We have a list of filenodes we think we need for a file, lets remove
1651 # We have a list of filenodes we think we need for a file, lets remove
1652 # all those we now the recipient must have.
1652 # all those we now the recipient must have.
1653 def prune_filenodes(f, filerevlog):
1653 def prune_filenodes(f, filerevlog):
1654 msngset = msng_filenode_set[f]
1654 msngset = msng_filenode_set[f]
1655 hasset = {}
1655 hasset = {}
1656 # If a 'missing' filenode thinks it belongs to a changenode we
1656 # If a 'missing' filenode thinks it belongs to a changenode we
1657 # assume the recipient must have, then the recipient must have
1657 # assume the recipient must have, then the recipient must have
1658 # that filenode.
1658 # that filenode.
1659 for n in msngset:
1659 for n in msngset:
1660 clnode = cl.node(filerevlog.linkrev(n))
1660 clnode = cl.node(filerevlog.linkrev(n))
1661 if clnode in has_cl_set:
1661 if clnode in has_cl_set:
1662 hasset[n] = 1
1662 hasset[n] = 1
1663 prune_parents(filerevlog, hasset, msngset)
1663 prune_parents(filerevlog, hasset, msngset)
1664
1664
1665 # A function generator function that sets up the a context for the
1665 # A function generator function that sets up the a context for the
1666 # inner function.
1666 # inner function.
1667 def lookup_filenode_link_func(fname):
1667 def lookup_filenode_link_func(fname):
1668 msngset = msng_filenode_set[fname]
1668 msngset = msng_filenode_set[fname]
1669 # Lookup the changenode the filenode belongs to.
1669 # Lookup the changenode the filenode belongs to.
1670 def lookup_filenode_link(fnode):
1670 def lookup_filenode_link(fnode):
1671 return msngset[fnode]
1671 return msngset[fnode]
1672 return lookup_filenode_link
1672 return lookup_filenode_link
1673
1673
1674 # Now that we have all theses utility functions to help out and
1674 # Now that we have all theses utility functions to help out and
1675 # logically divide up the task, generate the group.
1675 # logically divide up the task, generate the group.
1676 def gengroup():
1676 def gengroup():
1677 # The set of changed files starts empty.
1677 # The set of changed files starts empty.
1678 changedfiles = {}
1678 changedfiles = {}
1679 # Create a changenode group generator that will call our functions
1679 # Create a changenode group generator that will call our functions
1680 # back to lookup the owning changenode and collect information.
1680 # back to lookup the owning changenode and collect information.
1681 group = cl.group(msng_cl_lst, identity,
1681 group = cl.group(msng_cl_lst, identity,
1682 manifest_and_file_collector(changedfiles))
1682 manifest_and_file_collector(changedfiles))
1683 for chnk in group:
1683 for chnk in group:
1684 yield chnk
1684 yield chnk
1685
1685
1686 # The list of manifests has been collected by the generator
1686 # The list of manifests has been collected by the generator
1687 # calling our functions back.
1687 # calling our functions back.
1688 prune_manifests()
1688 prune_manifests()
1689 msng_mnfst_lst = msng_mnfst_set.keys()
1689 msng_mnfst_lst = msng_mnfst_set.keys()
1690 # Sort the manifestnodes by revision number.
1690 # Sort the manifestnodes by revision number.
1691 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1691 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1692 # Create a generator for the manifestnodes that calls our lookup
1692 # Create a generator for the manifestnodes that calls our lookup
1693 # and data collection functions back.
1693 # and data collection functions back.
1694 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1694 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1695 filenode_collector(changedfiles))
1695 filenode_collector(changedfiles))
1696 for chnk in group:
1696 for chnk in group:
1697 yield chnk
1697 yield chnk
1698
1698
1699 # These are no longer needed, dereference and toss the memory for
1699 # These are no longer needed, dereference and toss the memory for
1700 # them.
1700 # them.
1701 msng_mnfst_lst = None
1701 msng_mnfst_lst = None
1702 msng_mnfst_set.clear()
1702 msng_mnfst_set.clear()
1703
1703
1704 changedfiles = changedfiles.keys()
1704 changedfiles = changedfiles.keys()
1705 changedfiles.sort()
1705 changedfiles.sort()
1706 # Go through all our files in order sorted by name.
1706 # Go through all our files in order sorted by name.
1707 for fname in changedfiles:
1707 for fname in changedfiles:
1708 filerevlog = self.file(fname)
1708 filerevlog = self.file(fname)
1709 # Toss out the filenodes that the recipient isn't really
1709 # Toss out the filenodes that the recipient isn't really
1710 # missing.
1710 # missing.
1711 if msng_filenode_set.has_key(fname):
1711 if msng_filenode_set.has_key(fname):
1712 prune_filenodes(fname, filerevlog)
1712 prune_filenodes(fname, filerevlog)
1713 msng_filenode_lst = msng_filenode_set[fname].keys()
1713 msng_filenode_lst = msng_filenode_set[fname].keys()
1714 else:
1714 else:
1715 msng_filenode_lst = []
1715 msng_filenode_lst = []
1716 # If any filenodes are left, generate the group for them,
1716 # If any filenodes are left, generate the group for them,
1717 # otherwise don't bother.
1717 # otherwise don't bother.
1718 if len(msng_filenode_lst) > 0:
1718 if len(msng_filenode_lst) > 0:
1719 yield changegroup.genchunk(fname)
1719 yield changegroup.genchunk(fname)
1720 # Sort the filenodes by their revision #
1720 # Sort the filenodes by their revision #
1721 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1721 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1722 # Create a group generator and only pass in a changenode
1722 # Create a group generator and only pass in a changenode
1723 # lookup function as we need to collect no information
1723 # lookup function as we need to collect no information
1724 # from filenodes.
1724 # from filenodes.
1725 group = filerevlog.group(msng_filenode_lst,
1725 group = filerevlog.group(msng_filenode_lst,
1726 lookup_filenode_link_func(fname))
1726 lookup_filenode_link_func(fname))
1727 for chnk in group:
1727 for chnk in group:
1728 yield chnk
1728 yield chnk
1729 if msng_filenode_set.has_key(fname):
1729 if msng_filenode_set.has_key(fname):
1730 # Don't need this anymore, toss it to free memory.
1730 # Don't need this anymore, toss it to free memory.
1731 del msng_filenode_set[fname]
1731 del msng_filenode_set[fname]
1732 # Signal that no more groups are left.
1732 # Signal that no more groups are left.
1733 yield changegroup.closechunk()
1733 yield changegroup.closechunk()
1734
1734
1735 if msng_cl_lst:
1735 if msng_cl_lst:
1736 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1736 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1737
1737
1738 return util.chunkbuffer(gengroup())
1738 return util.chunkbuffer(gengroup())
1739
1739
1740 def changegroup(self, basenodes, source):
1740 def changegroup(self, basenodes, source):
1741 """Generate a changegroup of all nodes that we have that a recipient
1741 """Generate a changegroup of all nodes that we have that a recipient
1742 doesn't.
1742 doesn't.
1743
1743
1744 This is much easier than the previous function as we can assume that
1744 This is much easier than the previous function as we can assume that
1745 the recipient has any changenode we aren't sending them."""
1745 the recipient has any changenode we aren't sending them."""
1746
1746
1747 self.hook('preoutgoing', throw=True, source=source)
1747 self.hook('preoutgoing', throw=True, source=source)
1748
1748
1749 cl = self.changelog
1749 cl = self.changelog
1750 nodes = cl.nodesbetween(basenodes, None)[0]
1750 nodes = cl.nodesbetween(basenodes, None)[0]
1751 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1751 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1752 self.changegroupinfo(nodes)
1752 self.changegroupinfo(nodes)
1753
1753
1754 def identity(x):
1754 def identity(x):
1755 return x
1755 return x
1756
1756
1757 def gennodelst(revlog):
1757 def gennodelst(revlog):
1758 for r in xrange(0, revlog.count()):
1758 for r in xrange(0, revlog.count()):
1759 n = revlog.node(r)
1759 n = revlog.node(r)
1760 if revlog.linkrev(n) in revset:
1760 if revlog.linkrev(n) in revset:
1761 yield n
1761 yield n
1762
1762
1763 def changed_file_collector(changedfileset):
1763 def changed_file_collector(changedfileset):
1764 def collect_changed_files(clnode):
1764 def collect_changed_files(clnode):
1765 c = cl.read(clnode)
1765 c = cl.read(clnode)
1766 for fname in c[3]:
1766 for fname in c[3]:
1767 changedfileset[fname] = 1
1767 changedfileset[fname] = 1
1768 return collect_changed_files
1768 return collect_changed_files
1769
1769
1770 def lookuprevlink_func(revlog):
1770 def lookuprevlink_func(revlog):
1771 def lookuprevlink(n):
1771 def lookuprevlink(n):
1772 return cl.node(revlog.linkrev(n))
1772 return cl.node(revlog.linkrev(n))
1773 return lookuprevlink
1773 return lookuprevlink
1774
1774
1775 def gengroup():
1775 def gengroup():
1776 # construct a list of all changed files
1776 # construct a list of all changed files
1777 changedfiles = {}
1777 changedfiles = {}
1778
1778
1779 for chnk in cl.group(nodes, identity,
1779 for chnk in cl.group(nodes, identity,
1780 changed_file_collector(changedfiles)):
1780 changed_file_collector(changedfiles)):
1781 yield chnk
1781 yield chnk
1782 changedfiles = changedfiles.keys()
1782 changedfiles = changedfiles.keys()
1783 changedfiles.sort()
1783 changedfiles.sort()
1784
1784
1785 mnfst = self.manifest
1785 mnfst = self.manifest
1786 nodeiter = gennodelst(mnfst)
1786 nodeiter = gennodelst(mnfst)
1787 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1787 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1788 yield chnk
1788 yield chnk
1789
1789
1790 for fname in changedfiles:
1790 for fname in changedfiles:
1791 filerevlog = self.file(fname)
1791 filerevlog = self.file(fname)
1792 nodeiter = gennodelst(filerevlog)
1792 nodeiter = gennodelst(filerevlog)
1793 nodeiter = list(nodeiter)
1793 nodeiter = list(nodeiter)
1794 if nodeiter:
1794 if nodeiter:
1795 yield changegroup.genchunk(fname)
1795 yield changegroup.genchunk(fname)
1796 lookup = lookuprevlink_func(filerevlog)
1796 lookup = lookuprevlink_func(filerevlog)
1797 for chnk in filerevlog.group(nodeiter, lookup):
1797 for chnk in filerevlog.group(nodeiter, lookup):
1798 yield chnk
1798 yield chnk
1799
1799
1800 yield changegroup.closechunk()
1800 yield changegroup.closechunk()
1801
1801
1802 if nodes:
1802 if nodes:
1803 self.hook('outgoing', node=hex(nodes[0]), source=source)
1803 self.hook('outgoing', node=hex(nodes[0]), source=source)
1804
1804
1805 return util.chunkbuffer(gengroup())
1805 return util.chunkbuffer(gengroup())
1806
1806
1807 def addchangegroup(self, source, srctype, url):
1807 def addchangegroup(self, source, srctype, url):
1808 """add changegroup to repo.
1808 """add changegroup to repo.
1809
1809
1810 return values:
1810 return values:
1811 - nothing changed or no source: 0
1811 - nothing changed or no source: 0
1812 - more heads than before: 1+added heads (2..n)
1812 - more heads than before: 1+added heads (2..n)
1813 - less heads than before: -1-removed heads (-2..-n)
1813 - less heads than before: -1-removed heads (-2..-n)
1814 - number of heads stays the same: 1
1814 - number of heads stays the same: 1
1815 """
1815 """
1816 def csmap(x):
1816 def csmap(x):
1817 self.ui.debug(_("add changeset %s\n") % short(x))
1817 self.ui.debug(_("add changeset %s\n") % short(x))
1818 return cl.count()
1818 return cl.count()
1819
1819
1820 def revmap(x):
1820 def revmap(x):
1821 return cl.rev(x)
1821 return cl.rev(x)
1822
1822
1823 if not source:
1823 if not source:
1824 return 0
1824 return 0
1825
1825
1826 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1826 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1827
1827
1828 changesets = files = revisions = 0
1828 changesets = files = revisions = 0
1829
1829
1830 # write changelog data to temp files so concurrent readers will not see
1830 # write changelog data to temp files so concurrent readers will not see
1831 # inconsistent view
1831 # inconsistent view
1832 cl = self.changelog
1832 cl = self.changelog
1833 cl.delayupdate()
1833 cl.delayupdate()
1834 oldheads = len(cl.heads())
1834 oldheads = len(cl.heads())
1835
1835
1836 tr = self.transaction()
1836 tr = self.transaction()
1837 try:
1837 try:
1838 trp = weakref.proxy(tr)
1838 trp = weakref.proxy(tr)
1839 # pull off the changeset group
1839 # pull off the changeset group
1840 self.ui.status(_("adding changesets\n"))
1840 self.ui.status(_("adding changesets\n"))
1841 cor = cl.count() - 1
1841 cor = cl.count() - 1
1842 chunkiter = changegroup.chunkiter(source)
1842 chunkiter = changegroup.chunkiter(source)
1843 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1843 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1844 raise util.Abort(_("received changelog group is empty"))
1844 raise util.Abort(_("received changelog group is empty"))
1845 cnr = cl.count() - 1
1845 cnr = cl.count() - 1
1846 changesets = cnr - cor
1846 changesets = cnr - cor
1847
1847
1848 # pull off the manifest group
1848 # pull off the manifest group
1849 self.ui.status(_("adding manifests\n"))
1849 self.ui.status(_("adding manifests\n"))
1850 chunkiter = changegroup.chunkiter(source)
1850 chunkiter = changegroup.chunkiter(source)
1851 # no need to check for empty manifest group here:
1851 # no need to check for empty manifest group here:
1852 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1852 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1853 # no new manifest will be created and the manifest group will
1853 # no new manifest will be created and the manifest group will
1854 # be empty during the pull
1854 # be empty during the pull
1855 self.manifest.addgroup(chunkiter, revmap, trp)
1855 self.manifest.addgroup(chunkiter, revmap, trp)
1856
1856
1857 # process the files
1857 # process the files
1858 self.ui.status(_("adding file changes\n"))
1858 self.ui.status(_("adding file changes\n"))
1859 while 1:
1859 while 1:
1860 f = changegroup.getchunk(source)
1860 f = changegroup.getchunk(source)
1861 if not f:
1861 if not f:
1862 break
1862 break
1863 self.ui.debug(_("adding %s revisions\n") % f)
1863 self.ui.debug(_("adding %s revisions\n") % f)
1864 fl = self.file(f)
1864 fl = self.file(f)
1865 o = fl.count()
1865 o = fl.count()
1866 chunkiter = changegroup.chunkiter(source)
1866 chunkiter = changegroup.chunkiter(source)
1867 if fl.addgroup(chunkiter, revmap, trp) is None:
1867 if fl.addgroup(chunkiter, revmap, trp) is None:
1868 raise util.Abort(_("received file revlog group is empty"))
1868 raise util.Abort(_("received file revlog group is empty"))
1869 revisions += fl.count() - o
1869 revisions += fl.count() - o
1870 files += 1
1870 files += 1
1871
1871
1872 # make changelog see real files again
1872 # make changelog see real files again
1873 cl.finalize(trp)
1873 cl.finalize(trp)
1874
1874
1875 newheads = len(self.changelog.heads())
1875 newheads = len(self.changelog.heads())
1876 heads = ""
1876 heads = ""
1877 if oldheads and newheads != oldheads:
1877 if oldheads and newheads != oldheads:
1878 heads = _(" (%+d heads)") % (newheads - oldheads)
1878 heads = _(" (%+d heads)") % (newheads - oldheads)
1879
1879
1880 self.ui.status(_("added %d changesets"
1880 self.ui.status(_("added %d changesets"
1881 " with %d changes to %d files%s\n")
1881 " with %d changes to %d files%s\n")
1882 % (changesets, revisions, files, heads))
1882 % (changesets, revisions, files, heads))
1883
1883
1884 if changesets > 0:
1884 if changesets > 0:
1885 self.hook('pretxnchangegroup', throw=True,
1885 self.hook('pretxnchangegroup', throw=True,
1886 node=hex(self.changelog.node(cor+1)), source=srctype,
1886 node=hex(self.changelog.node(cor+1)), source=srctype,
1887 url=url)
1887 url=url)
1888
1888
1889 tr.close()
1889 tr.close()
1890 finally:
1890 finally:
1891 del tr
1891 del tr
1892
1892
1893 if changesets > 0:
1893 if changesets > 0:
1894 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1894 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1895 source=srctype, url=url)
1895 source=srctype, url=url)
1896
1896
1897 for i in xrange(cor + 1, cnr + 1):
1897 for i in xrange(cor + 1, cnr + 1):
1898 self.hook("incoming", node=hex(self.changelog.node(i)),
1898 self.hook("incoming", node=hex(self.changelog.node(i)),
1899 source=srctype, url=url)
1899 source=srctype, url=url)
1900
1900
1901 # never return 0 here:
1901 # never return 0 here:
1902 if newheads < oldheads:
1902 if newheads < oldheads:
1903 return newheads - oldheads - 1
1903 return newheads - oldheads - 1
1904 else:
1904 else:
1905 return newheads - oldheads + 1
1905 return newheads - oldheads + 1
1906
1906
1907
1907
1908 def stream_in(self, remote):
1908 def stream_in(self, remote):
1909 fp = remote.stream_out()
1909 fp = remote.stream_out()
1910 l = fp.readline()
1910 l = fp.readline()
1911 try:
1911 try:
1912 resp = int(l)
1912 resp = int(l)
1913 except ValueError:
1913 except ValueError:
1914 raise util.UnexpectedOutput(
1914 raise util.UnexpectedOutput(
1915 _('Unexpected response from remote server:'), l)
1915 _('Unexpected response from remote server:'), l)
1916 if resp == 1:
1916 if resp == 1:
1917 raise util.Abort(_('operation forbidden by server'))
1917 raise util.Abort(_('operation forbidden by server'))
1918 elif resp == 2:
1918 elif resp == 2:
1919 raise util.Abort(_('locking the remote repository failed'))
1919 raise util.Abort(_('locking the remote repository failed'))
1920 elif resp != 0:
1920 elif resp != 0:
1921 raise util.Abort(_('the server sent an unknown error code'))
1921 raise util.Abort(_('the server sent an unknown error code'))
1922 self.ui.status(_('streaming all changes\n'))
1922 self.ui.status(_('streaming all changes\n'))
1923 l = fp.readline()
1923 l = fp.readline()
1924 try:
1924 try:
1925 total_files, total_bytes = map(int, l.split(' ', 1))
1925 total_files, total_bytes = map(int, l.split(' ', 1))
1926 except ValueError, TypeError:
1926 except ValueError, TypeError:
1927 raise util.UnexpectedOutput(
1927 raise util.UnexpectedOutput(
1928 _('Unexpected response from remote server:'), l)
1928 _('Unexpected response from remote server:'), l)
1929 self.ui.status(_('%d files to transfer, %s of data\n') %
1929 self.ui.status(_('%d files to transfer, %s of data\n') %
1930 (total_files, util.bytecount(total_bytes)))
1930 (total_files, util.bytecount(total_bytes)))
1931 start = time.time()
1931 start = time.time()
1932 for i in xrange(total_files):
1932 for i in xrange(total_files):
1933 # XXX doesn't support '\n' or '\r' in filenames
1933 # XXX doesn't support '\n' or '\r' in filenames
1934 l = fp.readline()
1934 l = fp.readline()
1935 try:
1935 try:
1936 name, size = l.split('\0', 1)
1936 name, size = l.split('\0', 1)
1937 size = int(size)
1937 size = int(size)
1938 except ValueError, TypeError:
1938 except ValueError, TypeError:
1939 raise util.UnexpectedOutput(
1939 raise util.UnexpectedOutput(
1940 _('Unexpected response from remote server:'), l)
1940 _('Unexpected response from remote server:'), l)
1941 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1941 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1942 ofp = self.sopener(name, 'w')
1942 ofp = self.sopener(name, 'w')
1943 for chunk in util.filechunkiter(fp, limit=size):
1943 for chunk in util.filechunkiter(fp, limit=size):
1944 ofp.write(chunk)
1944 ofp.write(chunk)
1945 ofp.close()
1945 ofp.close()
1946 elapsed = time.time() - start
1946 elapsed = time.time() - start
1947 if elapsed <= 0:
1947 if elapsed <= 0:
1948 elapsed = 0.001
1948 elapsed = 0.001
1949 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1949 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1950 (util.bytecount(total_bytes), elapsed,
1950 (util.bytecount(total_bytes), elapsed,
1951 util.bytecount(total_bytes / elapsed)))
1951 util.bytecount(total_bytes / elapsed)))
1952 self.invalidate()
1952 self.invalidate()
1953 return len(self.heads()) + 1
1953 return len(self.heads()) + 1
1954
1954
1955 def clone(self, remote, heads=[], stream=False):
1955 def clone(self, remote, heads=[], stream=False):
1956 '''clone remote repository.
1956 '''clone remote repository.
1957
1957
1958 keyword arguments:
1958 keyword arguments:
1959 heads: list of revs to clone (forces use of pull)
1959 heads: list of revs to clone (forces use of pull)
1960 stream: use streaming clone if possible'''
1960 stream: use streaming clone if possible'''
1961
1961
1962 # now, all clients that can request uncompressed clones can
1962 # now, all clients that can request uncompressed clones can
1963 # read repo formats supported by all servers that can serve
1963 # read repo formats supported by all servers that can serve
1964 # them.
1964 # them.
1965
1965
1966 # if revlog format changes, client will have to check version
1966 # if revlog format changes, client will have to check version
1967 # and format flags on "stream" capability, and use
1967 # and format flags on "stream" capability, and use
1968 # uncompressed only if compatible.
1968 # uncompressed only if compatible.
1969
1969
1970 if stream and not heads and remote.capable('stream'):
1970 if stream and not heads and remote.capable('stream'):
1971 return self.stream_in(remote)
1971 return self.stream_in(remote)
1972 return self.pull(remote, heads)
1972 return self.pull(remote, heads)
1973
1973
1974 # used to avoid circular references so destructors work
1974 # used to avoid circular references so destructors work
1975 def aftertrans(files):
1975 def aftertrans(files):
1976 renamefiles = [tuple(t) for t in files]
1976 renamefiles = [tuple(t) for t in files]
1977 def a():
1977 def a():
1978 for src, dest in renamefiles:
1978 for src, dest in renamefiles:
1979 util.rename(src, dest)
1979 util.rename(src, dest)
1980 return a
1980 return a
1981
1981
1982 def instance(ui, path, create):
1982 def instance(ui, path, create):
1983 return localrepository(ui, util.drop_scheme('file', path), create)
1983 return localrepository(ui, util.drop_scheme('file', path), create)
1984
1984
1985 def islocal(path):
1985 def islocal(path):
1986 return True
1986 return True
General Comments 0
You need to be logged in to leave comments. Login now