##// END OF EJS Templates
localrepo: ensure files unicity in commit() (issue 714)
Patrick Mezard -
r5882:f791a2ac default
parent child Browse files
Show More
@@ -1,1992 +1,1994 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71 self.sopener = util.encodedopener(util.opener(self.spath),
71 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.encodefn)
72 self.encodefn)
73
73
74 self.ui = ui.ui(parentui=parentui)
74 self.ui = ui.ui(parentui=parentui)
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._transref = self._lockref = self._wlockref = None
85 self._transref = self._lockref = self._wlockref = None
86
86
87 def __getattr__(self, name):
87 def __getattr__(self, name):
88 if name == 'changelog':
88 if name == 'changelog':
89 self.changelog = changelog.changelog(self.sopener)
89 self.changelog = changelog.changelog(self.sopener)
90 self.sopener.defversion = self.changelog.version
90 self.sopener.defversion = self.changelog.version
91 return self.changelog
91 return self.changelog
92 if name == 'manifest':
92 if name == 'manifest':
93 self.changelog
93 self.changelog
94 self.manifest = manifest.manifest(self.sopener)
94 self.manifest = manifest.manifest(self.sopener)
95 return self.manifest
95 return self.manifest
96 if name == 'dirstate':
96 if name == 'dirstate':
97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 return self.dirstate
98 return self.dirstate
99 else:
99 else:
100 raise AttributeError, name
100 raise AttributeError, name
101
101
102 def url(self):
102 def url(self):
103 return 'file:' + self.root
103 return 'file:' + self.root
104
104
105 def hook(self, name, throw=False, **args):
105 def hook(self, name, throw=False, **args):
106 return hook.hook(self.ui, self, name, throw, **args)
106 return hook.hook(self.ui, self, name, throw, **args)
107
107
108 tag_disallowed = ':\r\n'
108 tag_disallowed = ':\r\n'
109
109
110 def _tag(self, name, node, message, local, user, date, parent=None,
110 def _tag(self, name, node, message, local, user, date, parent=None,
111 extra={}):
111 extra={}):
112 use_dirstate = parent is None
112 use_dirstate = parent is None
113
113
114 for c in self.tag_disallowed:
114 for c in self.tag_disallowed:
115 if c in name:
115 if c in name:
116 raise util.Abort(_('%r cannot be used in a tag name') % c)
116 raise util.Abort(_('%r cannot be used in a tag name') % c)
117
117
118 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
118 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119
119
120 def writetag(fp, name, munge, prevtags):
120 def writetag(fp, name, munge, prevtags):
121 if prevtags and prevtags[-1] != '\n':
121 if prevtags and prevtags[-1] != '\n':
122 fp.write('\n')
122 fp.write('\n')
123 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
123 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 fp.close()
124 fp.close()
125
125
126 prevtags = ''
126 prevtags = ''
127 if local:
127 if local:
128 try:
128 try:
129 fp = self.opener('localtags', 'r+')
129 fp = self.opener('localtags', 'r+')
130 except IOError, err:
130 except IOError, err:
131 fp = self.opener('localtags', 'a')
131 fp = self.opener('localtags', 'a')
132 else:
132 else:
133 prevtags = fp.read()
133 prevtags = fp.read()
134
134
135 # local tags are stored in the current charset
135 # local tags are stored in the current charset
136 writetag(fp, name, None, prevtags)
136 writetag(fp, name, None, prevtags)
137 self.hook('tag', node=hex(node), tag=name, local=local)
137 self.hook('tag', node=hex(node), tag=name, local=local)
138 return
138 return
139
139
140 if use_dirstate:
140 if use_dirstate:
141 try:
141 try:
142 fp = self.wfile('.hgtags', 'rb+')
142 fp = self.wfile('.hgtags', 'rb+')
143 except IOError, err:
143 except IOError, err:
144 fp = self.wfile('.hgtags', 'ab')
144 fp = self.wfile('.hgtags', 'ab')
145 else:
145 else:
146 prevtags = fp.read()
146 prevtags = fp.read()
147 else:
147 else:
148 try:
148 try:
149 prevtags = self.filectx('.hgtags', parent).data()
149 prevtags = self.filectx('.hgtags', parent).data()
150 except revlog.LookupError:
150 except revlog.LookupError:
151 pass
151 pass
152 fp = self.wfile('.hgtags', 'wb')
152 fp = self.wfile('.hgtags', 'wb')
153 if prevtags:
153 if prevtags:
154 fp.write(prevtags)
154 fp.write(prevtags)
155
155
156 # committed tags are stored in UTF-8
156 # committed tags are stored in UTF-8
157 writetag(fp, name, util.fromlocal, prevtags)
157 writetag(fp, name, util.fromlocal, prevtags)
158
158
159 if use_dirstate and '.hgtags' not in self.dirstate:
159 if use_dirstate and '.hgtags' not in self.dirstate:
160 self.add(['.hgtags'])
160 self.add(['.hgtags'])
161
161
162 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
162 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 extra=extra)
163 extra=extra)
164
164
165 self.hook('tag', node=hex(node), tag=name, local=local)
165 self.hook('tag', node=hex(node), tag=name, local=local)
166
166
167 return tagnode
167 return tagnode
168
168
169 def tag(self, name, node, message, local, user, date):
169 def tag(self, name, node, message, local, user, date):
170 '''tag a revision with a symbolic name.
170 '''tag a revision with a symbolic name.
171
171
172 if local is True, the tag is stored in a per-repository file.
172 if local is True, the tag is stored in a per-repository file.
173 otherwise, it is stored in the .hgtags file, and a new
173 otherwise, it is stored in the .hgtags file, and a new
174 changeset is committed with the change.
174 changeset is committed with the change.
175
175
176 keyword arguments:
176 keyword arguments:
177
177
178 local: whether to store tag in non-version-controlled file
178 local: whether to store tag in non-version-controlled file
179 (default False)
179 (default False)
180
180
181 message: commit message to use if committing
181 message: commit message to use if committing
182
182
183 user: name of user to use if committing
183 user: name of user to use if committing
184
184
185 date: date tuple to use if committing'''
185 date: date tuple to use if committing'''
186
186
187 for x in self.status()[:5]:
187 for x in self.status()[:5]:
188 if '.hgtags' in x:
188 if '.hgtags' in x:
189 raise util.Abort(_('working copy of .hgtags is changed '
189 raise util.Abort(_('working copy of .hgtags is changed '
190 '(please commit .hgtags manually)'))
190 '(please commit .hgtags manually)'))
191
191
192
192
193 self._tag(name, node, message, local, user, date)
193 self._tag(name, node, message, local, user, date)
194
194
195 def tags(self):
195 def tags(self):
196 '''return a mapping of tag to node'''
196 '''return a mapping of tag to node'''
197 if self.tagscache:
197 if self.tagscache:
198 return self.tagscache
198 return self.tagscache
199
199
200 globaltags = {}
200 globaltags = {}
201
201
202 def readtags(lines, fn):
202 def readtags(lines, fn):
203 filetags = {}
203 filetags = {}
204 count = 0
204 count = 0
205
205
206 def warn(msg):
206 def warn(msg):
207 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
207 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
208
208
209 for l in lines:
209 for l in lines:
210 count += 1
210 count += 1
211 if not l:
211 if not l:
212 continue
212 continue
213 s = l.split(" ", 1)
213 s = l.split(" ", 1)
214 if len(s) != 2:
214 if len(s) != 2:
215 warn(_("cannot parse entry"))
215 warn(_("cannot parse entry"))
216 continue
216 continue
217 node, key = s
217 node, key = s
218 key = util.tolocal(key.strip()) # stored in UTF-8
218 key = util.tolocal(key.strip()) # stored in UTF-8
219 try:
219 try:
220 bin_n = bin(node)
220 bin_n = bin(node)
221 except TypeError:
221 except TypeError:
222 warn(_("node '%s' is not well formed") % node)
222 warn(_("node '%s' is not well formed") % node)
223 continue
223 continue
224 if bin_n not in self.changelog.nodemap:
224 if bin_n not in self.changelog.nodemap:
225 warn(_("tag '%s' refers to unknown node") % key)
225 warn(_("tag '%s' refers to unknown node") % key)
226 continue
226 continue
227
227
228 h = []
228 h = []
229 if key in filetags:
229 if key in filetags:
230 n, h = filetags[key]
230 n, h = filetags[key]
231 h.append(n)
231 h.append(n)
232 filetags[key] = (bin_n, h)
232 filetags[key] = (bin_n, h)
233
233
234 for k, nh in filetags.items():
234 for k, nh in filetags.items():
235 if k not in globaltags:
235 if k not in globaltags:
236 globaltags[k] = nh
236 globaltags[k] = nh
237 continue
237 continue
238 # we prefer the global tag if:
238 # we prefer the global tag if:
239 # it supercedes us OR
239 # it supercedes us OR
240 # mutual supercedes and it has a higher rank
240 # mutual supercedes and it has a higher rank
241 # otherwise we win because we're tip-most
241 # otherwise we win because we're tip-most
242 an, ah = nh
242 an, ah = nh
243 bn, bh = globaltags[k]
243 bn, bh = globaltags[k]
244 if (bn != an and an in bh and
244 if (bn != an and an in bh and
245 (bn not in ah or len(bh) > len(ah))):
245 (bn not in ah or len(bh) > len(ah))):
246 an = bn
246 an = bn
247 ah.extend([n for n in bh if n not in ah])
247 ah.extend([n for n in bh if n not in ah])
248 globaltags[k] = an, ah
248 globaltags[k] = an, ah
249
249
250 # read the tags file from each head, ending with the tip
250 # read the tags file from each head, ending with the tip
251 f = None
251 f = None
252 for rev, node, fnode in self._hgtagsnodes():
252 for rev, node, fnode in self._hgtagsnodes():
253 f = (f and f.filectx(fnode) or
253 f = (f and f.filectx(fnode) or
254 self.filectx('.hgtags', fileid=fnode))
254 self.filectx('.hgtags', fileid=fnode))
255 readtags(f.data().splitlines(), f)
255 readtags(f.data().splitlines(), f)
256
256
257 try:
257 try:
258 data = util.fromlocal(self.opener("localtags").read())
258 data = util.fromlocal(self.opener("localtags").read())
259 # localtags are stored in the local character set
259 # localtags are stored in the local character set
260 # while the internal tag table is stored in UTF-8
260 # while the internal tag table is stored in UTF-8
261 readtags(data.splitlines(), "localtags")
261 readtags(data.splitlines(), "localtags")
262 except IOError:
262 except IOError:
263 pass
263 pass
264
264
265 self.tagscache = {}
265 self.tagscache = {}
266 for k,nh in globaltags.items():
266 for k,nh in globaltags.items():
267 n = nh[0]
267 n = nh[0]
268 if n != nullid:
268 if n != nullid:
269 self.tagscache[k] = n
269 self.tagscache[k] = n
270 self.tagscache['tip'] = self.changelog.tip()
270 self.tagscache['tip'] = self.changelog.tip()
271
271
272 return self.tagscache
272 return self.tagscache
273
273
274 def _hgtagsnodes(self):
274 def _hgtagsnodes(self):
275 heads = self.heads()
275 heads = self.heads()
276 heads.reverse()
276 heads.reverse()
277 last = {}
277 last = {}
278 ret = []
278 ret = []
279 for node in heads:
279 for node in heads:
280 c = self.changectx(node)
280 c = self.changectx(node)
281 rev = c.rev()
281 rev = c.rev()
282 try:
282 try:
283 fnode = c.filenode('.hgtags')
283 fnode = c.filenode('.hgtags')
284 except revlog.LookupError:
284 except revlog.LookupError:
285 continue
285 continue
286 ret.append((rev, node, fnode))
286 ret.append((rev, node, fnode))
287 if fnode in last:
287 if fnode in last:
288 ret[last[fnode]] = None
288 ret[last[fnode]] = None
289 last[fnode] = len(ret) - 1
289 last[fnode] = len(ret) - 1
290 return [item for item in ret if item]
290 return [item for item in ret if item]
291
291
292 def tagslist(self):
292 def tagslist(self):
293 '''return a list of tags ordered by revision'''
293 '''return a list of tags ordered by revision'''
294 l = []
294 l = []
295 for t, n in self.tags().items():
295 for t, n in self.tags().items():
296 try:
296 try:
297 r = self.changelog.rev(n)
297 r = self.changelog.rev(n)
298 except:
298 except:
299 r = -2 # sort to the beginning of the list if unknown
299 r = -2 # sort to the beginning of the list if unknown
300 l.append((r, t, n))
300 l.append((r, t, n))
301 l.sort()
301 l.sort()
302 return [(t, n) for r, t, n in l]
302 return [(t, n) for r, t, n in l]
303
303
304 def nodetags(self, node):
304 def nodetags(self, node):
305 '''return the tags associated with a node'''
305 '''return the tags associated with a node'''
306 if not self.nodetagscache:
306 if not self.nodetagscache:
307 self.nodetagscache = {}
307 self.nodetagscache = {}
308 for t, n in self.tags().items():
308 for t, n in self.tags().items():
309 self.nodetagscache.setdefault(n, []).append(t)
309 self.nodetagscache.setdefault(n, []).append(t)
310 return self.nodetagscache.get(node, [])
310 return self.nodetagscache.get(node, [])
311
311
312 def _branchtags(self):
312 def _branchtags(self):
313 partial, last, lrev = self._readbranchcache()
313 partial, last, lrev = self._readbranchcache()
314
314
315 tiprev = self.changelog.count() - 1
315 tiprev = self.changelog.count() - 1
316 if lrev != tiprev:
316 if lrev != tiprev:
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319
319
320 return partial
320 return partial
321
321
322 def branchtags(self):
322 def branchtags(self):
323 if self.branchcache is not None:
323 if self.branchcache is not None:
324 return self.branchcache
324 return self.branchcache
325
325
326 self.branchcache = {} # avoid recursion in changectx
326 self.branchcache = {} # avoid recursion in changectx
327 partial = self._branchtags()
327 partial = self._branchtags()
328
328
329 # the branch cache is stored on disk as UTF-8, but in the local
329 # the branch cache is stored on disk as UTF-8, but in the local
330 # charset internally
330 # charset internally
331 for k, v in partial.items():
331 for k, v in partial.items():
332 self.branchcache[util.tolocal(k)] = v
332 self.branchcache[util.tolocal(k)] = v
333 return self.branchcache
333 return self.branchcache
334
334
335 def _readbranchcache(self):
335 def _readbranchcache(self):
336 partial = {}
336 partial = {}
337 try:
337 try:
338 f = self.opener("branch.cache")
338 f = self.opener("branch.cache")
339 lines = f.read().split('\n')
339 lines = f.read().split('\n')
340 f.close()
340 f.close()
341 except (IOError, OSError):
341 except (IOError, OSError):
342 return {}, nullid, nullrev
342 return {}, nullid, nullrev
343
343
344 try:
344 try:
345 last, lrev = lines.pop(0).split(" ", 1)
345 last, lrev = lines.pop(0).split(" ", 1)
346 last, lrev = bin(last), int(lrev)
346 last, lrev = bin(last), int(lrev)
347 if not (lrev < self.changelog.count() and
347 if not (lrev < self.changelog.count() and
348 self.changelog.node(lrev) == last): # sanity check
348 self.changelog.node(lrev) == last): # sanity check
349 # invalidate the cache
349 # invalidate the cache
350 raise ValueError('Invalid branch cache: unknown tip')
350 raise ValueError('Invalid branch cache: unknown tip')
351 for l in lines:
351 for l in lines:
352 if not l: continue
352 if not l: continue
353 node, label = l.split(" ", 1)
353 node, label = l.split(" ", 1)
354 partial[label.strip()] = bin(node)
354 partial[label.strip()] = bin(node)
355 except (KeyboardInterrupt, util.SignalInterrupt):
355 except (KeyboardInterrupt, util.SignalInterrupt):
356 raise
356 raise
357 except Exception, inst:
357 except Exception, inst:
358 if self.ui.debugflag:
358 if self.ui.debugflag:
359 self.ui.warn(str(inst), '\n')
359 self.ui.warn(str(inst), '\n')
360 partial, last, lrev = {}, nullid, nullrev
360 partial, last, lrev = {}, nullid, nullrev
361 return partial, last, lrev
361 return partial, last, lrev
362
362
363 def _writebranchcache(self, branches, tip, tiprev):
363 def _writebranchcache(self, branches, tip, tiprev):
364 try:
364 try:
365 f = self.opener("branch.cache", "w", atomictemp=True)
365 f = self.opener("branch.cache", "w", atomictemp=True)
366 f.write("%s %s\n" % (hex(tip), tiprev))
366 f.write("%s %s\n" % (hex(tip), tiprev))
367 for label, node in branches.iteritems():
367 for label, node in branches.iteritems():
368 f.write("%s %s\n" % (hex(node), label))
368 f.write("%s %s\n" % (hex(node), label))
369 f.rename()
369 f.rename()
370 except (IOError, OSError):
370 except (IOError, OSError):
371 pass
371 pass
372
372
373 def _updatebranchcache(self, partial, start, end):
373 def _updatebranchcache(self, partial, start, end):
374 for r in xrange(start, end):
374 for r in xrange(start, end):
375 c = self.changectx(r)
375 c = self.changectx(r)
376 b = c.branch()
376 b = c.branch()
377 partial[b] = c.node()
377 partial[b] = c.node()
378
378
379 def lookup(self, key):
379 def lookup(self, key):
380 if key == '.':
380 if key == '.':
381 key, second = self.dirstate.parents()
381 key, second = self.dirstate.parents()
382 if key == nullid:
382 if key == nullid:
383 raise repo.RepoError(_("no revision checked out"))
383 raise repo.RepoError(_("no revision checked out"))
384 if second != nullid:
384 if second != nullid:
385 self.ui.warn(_("warning: working directory has two parents, "
385 self.ui.warn(_("warning: working directory has two parents, "
386 "tag '.' uses the first\n"))
386 "tag '.' uses the first\n"))
387 elif key == 'null':
387 elif key == 'null':
388 return nullid
388 return nullid
389 n = self.changelog._match(key)
389 n = self.changelog._match(key)
390 if n:
390 if n:
391 return n
391 return n
392 if key in self.tags():
392 if key in self.tags():
393 return self.tags()[key]
393 return self.tags()[key]
394 if key in self.branchtags():
394 if key in self.branchtags():
395 return self.branchtags()[key]
395 return self.branchtags()[key]
396 n = self.changelog._partialmatch(key)
396 n = self.changelog._partialmatch(key)
397 if n:
397 if n:
398 return n
398 return n
399 try:
399 try:
400 if len(key) == 20:
400 if len(key) == 20:
401 key = hex(key)
401 key = hex(key)
402 except:
402 except:
403 pass
403 pass
404 raise repo.RepoError(_("unknown revision '%s'") % key)
404 raise repo.RepoError(_("unknown revision '%s'") % key)
405
405
406 def dev(self):
406 def dev(self):
407 return os.lstat(self.path).st_dev
407 return os.lstat(self.path).st_dev
408
408
409 def local(self):
409 def local(self):
410 return True
410 return True
411
411
412 def join(self, f):
412 def join(self, f):
413 return os.path.join(self.path, f)
413 return os.path.join(self.path, f)
414
414
415 def sjoin(self, f):
415 def sjoin(self, f):
416 f = self.encodefn(f)
416 f = self.encodefn(f)
417 return os.path.join(self.spath, f)
417 return os.path.join(self.spath, f)
418
418
419 def wjoin(self, f):
419 def wjoin(self, f):
420 return os.path.join(self.root, f)
420 return os.path.join(self.root, f)
421
421
422 def file(self, f):
422 def file(self, f):
423 if f[0] == '/':
423 if f[0] == '/':
424 f = f[1:]
424 f = f[1:]
425 return filelog.filelog(self.sopener, f)
425 return filelog.filelog(self.sopener, f)
426
426
427 def changectx(self, changeid=None):
427 def changectx(self, changeid=None):
428 return context.changectx(self, changeid)
428 return context.changectx(self, changeid)
429
429
430 def workingctx(self):
430 def workingctx(self):
431 return context.workingctx(self)
431 return context.workingctx(self)
432
432
433 def parents(self, changeid=None):
433 def parents(self, changeid=None):
434 '''
434 '''
435 get list of changectxs for parents of changeid or working directory
435 get list of changectxs for parents of changeid or working directory
436 '''
436 '''
437 if changeid is None:
437 if changeid is None:
438 pl = self.dirstate.parents()
438 pl = self.dirstate.parents()
439 else:
439 else:
440 n = self.changelog.lookup(changeid)
440 n = self.changelog.lookup(changeid)
441 pl = self.changelog.parents(n)
441 pl = self.changelog.parents(n)
442 if pl[1] == nullid:
442 if pl[1] == nullid:
443 return [self.changectx(pl[0])]
443 return [self.changectx(pl[0])]
444 return [self.changectx(pl[0]), self.changectx(pl[1])]
444 return [self.changectx(pl[0]), self.changectx(pl[1])]
445
445
446 def filectx(self, path, changeid=None, fileid=None):
446 def filectx(self, path, changeid=None, fileid=None):
447 """changeid can be a changeset revision, node, or tag.
447 """changeid can be a changeset revision, node, or tag.
448 fileid can be a file revision or node."""
448 fileid can be a file revision or node."""
449 return context.filectx(self, path, changeid, fileid)
449 return context.filectx(self, path, changeid, fileid)
450
450
451 def getcwd(self):
451 def getcwd(self):
452 return self.dirstate.getcwd()
452 return self.dirstate.getcwd()
453
453
454 def pathto(self, f, cwd=None):
454 def pathto(self, f, cwd=None):
455 return self.dirstate.pathto(f, cwd)
455 return self.dirstate.pathto(f, cwd)
456
456
457 def wfile(self, f, mode='r'):
457 def wfile(self, f, mode='r'):
458 return self.wopener(f, mode)
458 return self.wopener(f, mode)
459
459
460 def _link(self, f):
460 def _link(self, f):
461 return os.path.islink(self.wjoin(f))
461 return os.path.islink(self.wjoin(f))
462
462
463 def _filter(self, filter, filename, data):
463 def _filter(self, filter, filename, data):
464 if filter not in self.filterpats:
464 if filter not in self.filterpats:
465 l = []
465 l = []
466 for pat, cmd in self.ui.configitems(filter):
466 for pat, cmd in self.ui.configitems(filter):
467 mf = util.matcher(self.root, "", [pat], [], [])[1]
467 mf = util.matcher(self.root, "", [pat], [], [])[1]
468 l.append((mf, cmd))
468 l.append((mf, cmd))
469 self.filterpats[filter] = l
469 self.filterpats[filter] = l
470
470
471 for mf, cmd in self.filterpats[filter]:
471 for mf, cmd in self.filterpats[filter]:
472 if mf(filename):
472 if mf(filename):
473 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
473 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
474 data = util.filter(data, cmd)
474 data = util.filter(data, cmd)
475 break
475 break
476
476
477 return data
477 return data
478
478
479 def wread(self, filename):
479 def wread(self, filename):
480 if self._link(filename):
480 if self._link(filename):
481 data = os.readlink(self.wjoin(filename))
481 data = os.readlink(self.wjoin(filename))
482 else:
482 else:
483 data = self.wopener(filename, 'r').read()
483 data = self.wopener(filename, 'r').read()
484 return self._filter("encode", filename, data)
484 return self._filter("encode", filename, data)
485
485
486 def wwrite(self, filename, data, flags):
486 def wwrite(self, filename, data, flags):
487 data = self._filter("decode", filename, data)
487 data = self._filter("decode", filename, data)
488 if "l" in flags:
488 if "l" in flags:
489 self.wopener.symlink(data, filename)
489 self.wopener.symlink(data, filename)
490 else:
490 else:
491 try:
491 try:
492 if self._link(filename):
492 if self._link(filename):
493 os.unlink(self.wjoin(filename))
493 os.unlink(self.wjoin(filename))
494 except OSError:
494 except OSError:
495 pass
495 pass
496 self.wopener(filename, 'w').write(data)
496 self.wopener(filename, 'w').write(data)
497 util.set_exec(self.wjoin(filename), "x" in flags)
497 util.set_exec(self.wjoin(filename), "x" in flags)
498
498
499 def wwritedata(self, filename, data):
499 def wwritedata(self, filename, data):
500 return self._filter("decode", filename, data)
500 return self._filter("decode", filename, data)
501
501
502 def transaction(self):
502 def transaction(self):
503 if self._transref and self._transref():
503 if self._transref and self._transref():
504 return self._transref().nest()
504 return self._transref().nest()
505
505
506 # save dirstate for rollback
506 # save dirstate for rollback
507 try:
507 try:
508 ds = self.opener("dirstate").read()
508 ds = self.opener("dirstate").read()
509 except IOError:
509 except IOError:
510 ds = ""
510 ds = ""
511 self.opener("journal.dirstate", "w").write(ds)
511 self.opener("journal.dirstate", "w").write(ds)
512
512
513 renames = [(self.sjoin("journal"), self.sjoin("undo")),
513 renames = [(self.sjoin("journal"), self.sjoin("undo")),
514 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
514 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
515 tr = transaction.transaction(self.ui.warn, self.sopener,
515 tr = transaction.transaction(self.ui.warn, self.sopener,
516 self.sjoin("journal"),
516 self.sjoin("journal"),
517 aftertrans(renames))
517 aftertrans(renames))
518 self._transref = weakref.ref(tr)
518 self._transref = weakref.ref(tr)
519 return tr
519 return tr
520
520
521 def recover(self):
521 def recover(self):
522 l = self.lock()
522 l = self.lock()
523 try:
523 try:
524 if os.path.exists(self.sjoin("journal")):
524 if os.path.exists(self.sjoin("journal")):
525 self.ui.status(_("rolling back interrupted transaction\n"))
525 self.ui.status(_("rolling back interrupted transaction\n"))
526 transaction.rollback(self.sopener, self.sjoin("journal"))
526 transaction.rollback(self.sopener, self.sjoin("journal"))
527 self.invalidate()
527 self.invalidate()
528 return True
528 return True
529 else:
529 else:
530 self.ui.warn(_("no interrupted transaction available\n"))
530 self.ui.warn(_("no interrupted transaction available\n"))
531 return False
531 return False
532 finally:
532 finally:
533 del l
533 del l
534
534
535 def rollback(self):
535 def rollback(self):
536 wlock = lock = None
536 wlock = lock = None
537 try:
537 try:
538 wlock = self.wlock()
538 wlock = self.wlock()
539 lock = self.lock()
539 lock = self.lock()
540 if os.path.exists(self.sjoin("undo")):
540 if os.path.exists(self.sjoin("undo")):
541 self.ui.status(_("rolling back last transaction\n"))
541 self.ui.status(_("rolling back last transaction\n"))
542 transaction.rollback(self.sopener, self.sjoin("undo"))
542 transaction.rollback(self.sopener, self.sjoin("undo"))
543 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
543 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
544 self.invalidate()
544 self.invalidate()
545 self.dirstate.invalidate()
545 self.dirstate.invalidate()
546 else:
546 else:
547 self.ui.warn(_("no rollback information available\n"))
547 self.ui.warn(_("no rollback information available\n"))
548 finally:
548 finally:
549 del lock, wlock
549 del lock, wlock
550
550
551 def invalidate(self):
551 def invalidate(self):
552 for a in "changelog manifest".split():
552 for a in "changelog manifest".split():
553 if hasattr(self, a):
553 if hasattr(self, a):
554 self.__delattr__(a)
554 self.__delattr__(a)
555 self.tagscache = None
555 self.tagscache = None
556 self.nodetagscache = None
556 self.nodetagscache = None
557
557
558 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
558 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
559 try:
559 try:
560 l = lock.lock(lockname, 0, releasefn, desc=desc)
560 l = lock.lock(lockname, 0, releasefn, desc=desc)
561 except lock.LockHeld, inst:
561 except lock.LockHeld, inst:
562 if not wait:
562 if not wait:
563 raise
563 raise
564 self.ui.warn(_("waiting for lock on %s held by %r\n") %
564 self.ui.warn(_("waiting for lock on %s held by %r\n") %
565 (desc, inst.locker))
565 (desc, inst.locker))
566 # default to 600 seconds timeout
566 # default to 600 seconds timeout
567 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
567 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
568 releasefn, desc=desc)
568 releasefn, desc=desc)
569 if acquirefn:
569 if acquirefn:
570 acquirefn()
570 acquirefn()
571 return l
571 return l
572
572
573 def lock(self, wait=True):
573 def lock(self, wait=True):
574 if self._lockref and self._lockref():
574 if self._lockref and self._lockref():
575 return self._lockref()
575 return self._lockref()
576
576
577 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
577 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
578 _('repository %s') % self.origroot)
578 _('repository %s') % self.origroot)
579 self._lockref = weakref.ref(l)
579 self._lockref = weakref.ref(l)
580 return l
580 return l
581
581
582 def wlock(self, wait=True):
582 def wlock(self, wait=True):
583 if self._wlockref and self._wlockref():
583 if self._wlockref and self._wlockref():
584 return self._wlockref()
584 return self._wlockref()
585
585
586 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
586 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
587 self.dirstate.invalidate, _('working directory of %s') %
587 self.dirstate.invalidate, _('working directory of %s') %
588 self.origroot)
588 self.origroot)
589 self._wlockref = weakref.ref(l)
589 self._wlockref = weakref.ref(l)
590 return l
590 return l
591
591
592 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
592 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
593 """
593 """
594 commit an individual file as part of a larger transaction
594 commit an individual file as part of a larger transaction
595 """
595 """
596
596
597 t = self.wread(fn)
597 t = self.wread(fn)
598 fl = self.file(fn)
598 fl = self.file(fn)
599 fp1 = manifest1.get(fn, nullid)
599 fp1 = manifest1.get(fn, nullid)
600 fp2 = manifest2.get(fn, nullid)
600 fp2 = manifest2.get(fn, nullid)
601
601
602 meta = {}
602 meta = {}
603 cp = self.dirstate.copied(fn)
603 cp = self.dirstate.copied(fn)
604 if cp:
604 if cp:
605 # Mark the new revision of this file as a copy of another
605 # Mark the new revision of this file as a copy of another
606 # file. This copy data will effectively act as a parent
606 # file. This copy data will effectively act as a parent
607 # of this new revision. If this is a merge, the first
607 # of this new revision. If this is a merge, the first
608 # parent will be the nullid (meaning "look up the copy data")
608 # parent will be the nullid (meaning "look up the copy data")
609 # and the second one will be the other parent. For example:
609 # and the second one will be the other parent. For example:
610 #
610 #
611 # 0 --- 1 --- 3 rev1 changes file foo
611 # 0 --- 1 --- 3 rev1 changes file foo
612 # \ / rev2 renames foo to bar and changes it
612 # \ / rev2 renames foo to bar and changes it
613 # \- 2 -/ rev3 should have bar with all changes and
613 # \- 2 -/ rev3 should have bar with all changes and
614 # should record that bar descends from
614 # should record that bar descends from
615 # bar in rev2 and foo in rev1
615 # bar in rev2 and foo in rev1
616 #
616 #
617 # this allows this merge to succeed:
617 # this allows this merge to succeed:
618 #
618 #
619 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
619 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
620 # \ / merging rev3 and rev4 should use bar@rev2
620 # \ / merging rev3 and rev4 should use bar@rev2
621 # \- 2 --- 4 as the merge base
621 # \- 2 --- 4 as the merge base
622 #
622 #
623 meta["copy"] = cp
623 meta["copy"] = cp
624 if not manifest2: # not a branch merge
624 if not manifest2: # not a branch merge
625 meta["copyrev"] = hex(manifest1.get(cp, nullid))
625 meta["copyrev"] = hex(manifest1.get(cp, nullid))
626 fp2 = nullid
626 fp2 = nullid
627 elif fp2 != nullid: # copied on remote side
627 elif fp2 != nullid: # copied on remote side
628 meta["copyrev"] = hex(manifest1.get(cp, nullid))
628 meta["copyrev"] = hex(manifest1.get(cp, nullid))
629 elif fp1 != nullid: # copied on local side, reversed
629 elif fp1 != nullid: # copied on local side, reversed
630 meta["copyrev"] = hex(manifest2.get(cp))
630 meta["copyrev"] = hex(manifest2.get(cp))
631 fp2 = fp1
631 fp2 = fp1
632 elif cp in manifest2: # directory rename on local side
632 elif cp in manifest2: # directory rename on local side
633 meta["copyrev"] = hex(manifest2[cp])
633 meta["copyrev"] = hex(manifest2[cp])
634 else: # directory rename on remote side
634 else: # directory rename on remote side
635 meta["copyrev"] = hex(manifest1.get(cp, nullid))
635 meta["copyrev"] = hex(manifest1.get(cp, nullid))
636 self.ui.debug(_(" %s: copy %s:%s\n") %
636 self.ui.debug(_(" %s: copy %s:%s\n") %
637 (fn, cp, meta["copyrev"]))
637 (fn, cp, meta["copyrev"]))
638 fp1 = nullid
638 fp1 = nullid
639 elif fp2 != nullid:
639 elif fp2 != nullid:
640 # is one parent an ancestor of the other?
640 # is one parent an ancestor of the other?
641 fpa = fl.ancestor(fp1, fp2)
641 fpa = fl.ancestor(fp1, fp2)
642 if fpa == fp1:
642 if fpa == fp1:
643 fp1, fp2 = fp2, nullid
643 fp1, fp2 = fp2, nullid
644 elif fpa == fp2:
644 elif fpa == fp2:
645 fp2 = nullid
645 fp2 = nullid
646
646
647 # is the file unmodified from the parent? report existing entry
647 # is the file unmodified from the parent? report existing entry
648 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
648 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
649 return fp1
649 return fp1
650
650
651 changelist.append(fn)
651 changelist.append(fn)
652 return fl.add(t, meta, tr, linkrev, fp1, fp2)
652 return fl.add(t, meta, tr, linkrev, fp1, fp2)
653
653
654 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
654 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
655 if p1 is None:
655 if p1 is None:
656 p1, p2 = self.dirstate.parents()
656 p1, p2 = self.dirstate.parents()
657 return self.commit(files=files, text=text, user=user, date=date,
657 return self.commit(files=files, text=text, user=user, date=date,
658 p1=p1, p2=p2, extra=extra, empty_ok=True)
658 p1=p1, p2=p2, extra=extra, empty_ok=True)
659
659
660 def commit(self, files=None, text="", user=None, date=None,
660 def commit(self, files=None, text="", user=None, date=None,
661 match=util.always, force=False, force_editor=False,
661 match=util.always, force=False, force_editor=False,
662 p1=None, p2=None, extra={}, empty_ok=False):
662 p1=None, p2=None, extra={}, empty_ok=False):
663 wlock = lock = tr = None
663 wlock = lock = tr = None
664 if files:
665 files = util.unique(files)
664 try:
666 try:
665 commit = []
667 commit = []
666 remove = []
668 remove = []
667 changed = []
669 changed = []
668 use_dirstate = (p1 is None) # not rawcommit
670 use_dirstate = (p1 is None) # not rawcommit
669 extra = extra.copy()
671 extra = extra.copy()
670
672
671 if use_dirstate:
673 if use_dirstate:
672 if files:
674 if files:
673 for f in files:
675 for f in files:
674 s = self.dirstate[f]
676 s = self.dirstate[f]
675 if s in 'nma':
677 if s in 'nma':
676 commit.append(f)
678 commit.append(f)
677 elif s == 'r':
679 elif s == 'r':
678 remove.append(f)
680 remove.append(f)
679 else:
681 else:
680 self.ui.warn(_("%s not tracked!\n") % f)
682 self.ui.warn(_("%s not tracked!\n") % f)
681 else:
683 else:
682 changes = self.status(match=match)[:5]
684 changes = self.status(match=match)[:5]
683 modified, added, removed, deleted, unknown = changes
685 modified, added, removed, deleted, unknown = changes
684 commit = modified + added
686 commit = modified + added
685 remove = removed
687 remove = removed
686 else:
688 else:
687 commit = files
689 commit = files
688
690
689 if use_dirstate:
691 if use_dirstate:
690 p1, p2 = self.dirstate.parents()
692 p1, p2 = self.dirstate.parents()
691 update_dirstate = True
693 update_dirstate = True
692 else:
694 else:
693 p1, p2 = p1, p2 or nullid
695 p1, p2 = p1, p2 or nullid
694 update_dirstate = (self.dirstate.parents()[0] == p1)
696 update_dirstate = (self.dirstate.parents()[0] == p1)
695
697
696 c1 = self.changelog.read(p1)
698 c1 = self.changelog.read(p1)
697 c2 = self.changelog.read(p2)
699 c2 = self.changelog.read(p2)
698 m1 = self.manifest.read(c1[0]).copy()
700 m1 = self.manifest.read(c1[0]).copy()
699 m2 = self.manifest.read(c2[0])
701 m2 = self.manifest.read(c2[0])
700
702
701 if use_dirstate:
703 if use_dirstate:
702 branchname = self.workingctx().branch()
704 branchname = self.workingctx().branch()
703 try:
705 try:
704 branchname = branchname.decode('UTF-8').encode('UTF-8')
706 branchname = branchname.decode('UTF-8').encode('UTF-8')
705 except UnicodeDecodeError:
707 except UnicodeDecodeError:
706 raise util.Abort(_('branch name not in UTF-8!'))
708 raise util.Abort(_('branch name not in UTF-8!'))
707 else:
709 else:
708 branchname = ""
710 branchname = ""
709
711
710 if use_dirstate:
712 if use_dirstate:
711 oldname = c1[5].get("branch") # stored in UTF-8
713 oldname = c1[5].get("branch") # stored in UTF-8
712 if (not commit and not remove and not force and p2 == nullid
714 if (not commit and not remove and not force and p2 == nullid
713 and branchname == oldname):
715 and branchname == oldname):
714 self.ui.status(_("nothing changed\n"))
716 self.ui.status(_("nothing changed\n"))
715 return None
717 return None
716
718
717 xp1 = hex(p1)
719 xp1 = hex(p1)
718 if p2 == nullid: xp2 = ''
720 if p2 == nullid: xp2 = ''
719 else: xp2 = hex(p2)
721 else: xp2 = hex(p2)
720
722
721 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
723 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
722
724
723 wlock = self.wlock()
725 wlock = self.wlock()
724 lock = self.lock()
726 lock = self.lock()
725 tr = self.transaction()
727 tr = self.transaction()
726 trp = weakref.proxy(tr)
728 trp = weakref.proxy(tr)
727
729
728 # check in files
730 # check in files
729 new = {}
731 new = {}
730 linkrev = self.changelog.count()
732 linkrev = self.changelog.count()
731 commit.sort()
733 commit.sort()
732 is_exec = util.execfunc(self.root, m1.execf)
734 is_exec = util.execfunc(self.root, m1.execf)
733 is_link = util.linkfunc(self.root, m1.linkf)
735 is_link = util.linkfunc(self.root, m1.linkf)
734 for f in commit:
736 for f in commit:
735 self.ui.note(f + "\n")
737 self.ui.note(f + "\n")
736 try:
738 try:
737 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
739 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
738 new_exec = is_exec(f)
740 new_exec = is_exec(f)
739 new_link = is_link(f)
741 new_link = is_link(f)
740 if ((not changed or changed[-1] != f) and
742 if ((not changed or changed[-1] != f) and
741 m2.get(f) != new[f]):
743 m2.get(f) != new[f]):
742 # mention the file in the changelog if some
744 # mention the file in the changelog if some
743 # flag changed, even if there was no content
745 # flag changed, even if there was no content
744 # change.
746 # change.
745 old_exec = m1.execf(f)
747 old_exec = m1.execf(f)
746 old_link = m1.linkf(f)
748 old_link = m1.linkf(f)
747 if old_exec != new_exec or old_link != new_link:
749 if old_exec != new_exec or old_link != new_link:
748 changed.append(f)
750 changed.append(f)
749 m1.set(f, new_exec, new_link)
751 m1.set(f, new_exec, new_link)
750 except (OSError, IOError):
752 except (OSError, IOError):
751 if use_dirstate:
753 if use_dirstate:
752 self.ui.warn(_("trouble committing %s!\n") % f)
754 self.ui.warn(_("trouble committing %s!\n") % f)
753 raise
755 raise
754 else:
756 else:
755 remove.append(f)
757 remove.append(f)
756
758
757 # update manifest
759 # update manifest
758 m1.update(new)
760 m1.update(new)
759 remove.sort()
761 remove.sort()
760 removed = []
762 removed = []
761
763
762 for f in remove:
764 for f in remove:
763 if f in m1:
765 if f in m1:
764 del m1[f]
766 del m1[f]
765 removed.append(f)
767 removed.append(f)
766 elif f in m2:
768 elif f in m2:
767 removed.append(f)
769 removed.append(f)
768 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
770 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
769 (new, removed))
771 (new, removed))
770
772
771 # add changeset
773 # add changeset
772 new = new.keys()
774 new = new.keys()
773 new.sort()
775 new.sort()
774
776
775 user = user or self.ui.username()
777 user = user or self.ui.username()
776 if (not empty_ok and not text) or force_editor:
778 if (not empty_ok and not text) or force_editor:
777 edittext = []
779 edittext = []
778 if text:
780 if text:
779 edittext.append(text)
781 edittext.append(text)
780 edittext.append("")
782 edittext.append("")
781 edittext.append("HG: user: %s" % user)
783 edittext.append("HG: user: %s" % user)
782 if p2 != nullid:
784 if p2 != nullid:
783 edittext.append("HG: branch merge")
785 edittext.append("HG: branch merge")
784 if branchname:
786 if branchname:
785 edittext.append("HG: branch %s" % util.tolocal(branchname))
787 edittext.append("HG: branch %s" % util.tolocal(branchname))
786 edittext.extend(["HG: changed %s" % f for f in changed])
788 edittext.extend(["HG: changed %s" % f for f in changed])
787 edittext.extend(["HG: removed %s" % f for f in removed])
789 edittext.extend(["HG: removed %s" % f for f in removed])
788 if not changed and not remove:
790 if not changed and not remove:
789 edittext.append("HG: no files changed")
791 edittext.append("HG: no files changed")
790 edittext.append("")
792 edittext.append("")
791 # run editor in the repository root
793 # run editor in the repository root
792 olddir = os.getcwd()
794 olddir = os.getcwd()
793 os.chdir(self.root)
795 os.chdir(self.root)
794 text = self.ui.edit("\n".join(edittext), user)
796 text = self.ui.edit("\n".join(edittext), user)
795 os.chdir(olddir)
797 os.chdir(olddir)
796
798
797 if branchname:
799 if branchname:
798 extra["branch"] = branchname
800 extra["branch"] = branchname
799
801
800 if use_dirstate:
802 if use_dirstate:
801 lines = [line.rstrip() for line in text.rstrip().splitlines()]
803 lines = [line.rstrip() for line in text.rstrip().splitlines()]
802 while lines and not lines[0]:
804 while lines and not lines[0]:
803 del lines[0]
805 del lines[0]
804 if not lines:
806 if not lines:
805 return None
807 return None
806 text = '\n'.join(lines)
808 text = '\n'.join(lines)
807
809
808 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
810 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
809 user, date, extra)
811 user, date, extra)
810 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
812 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
811 parent2=xp2)
813 parent2=xp2)
812 tr.close()
814 tr.close()
813
815
814 if self.branchcache and "branch" in extra:
816 if self.branchcache and "branch" in extra:
815 self.branchcache[util.tolocal(extra["branch"])] = n
817 self.branchcache[util.tolocal(extra["branch"])] = n
816
818
817 if use_dirstate or update_dirstate:
819 if use_dirstate or update_dirstate:
818 self.dirstate.setparents(n)
820 self.dirstate.setparents(n)
819 if use_dirstate:
821 if use_dirstate:
820 for f in new:
822 for f in new:
821 self.dirstate.normal(f)
823 self.dirstate.normal(f)
822 for f in removed:
824 for f in removed:
823 self.dirstate.forget(f)
825 self.dirstate.forget(f)
824
826
825 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
827 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
826 return n
828 return n
827 finally:
829 finally:
828 del tr, lock, wlock
830 del tr, lock, wlock
829
831
830 def walk(self, node=None, files=[], match=util.always, badmatch=None):
832 def walk(self, node=None, files=[], match=util.always, badmatch=None):
831 '''
833 '''
832 walk recursively through the directory tree or a given
834 walk recursively through the directory tree or a given
833 changeset, finding all files matched by the match
835 changeset, finding all files matched by the match
834 function
836 function
835
837
836 results are yielded in a tuple (src, filename), where src
838 results are yielded in a tuple (src, filename), where src
837 is one of:
839 is one of:
838 'f' the file was found in the directory tree
840 'f' the file was found in the directory tree
839 'm' the file was only in the dirstate and not in the tree
841 'm' the file was only in the dirstate and not in the tree
840 'b' file was not found and matched badmatch
842 'b' file was not found and matched badmatch
841 '''
843 '''
842
844
843 if node:
845 if node:
844 fdict = dict.fromkeys(files)
846 fdict = dict.fromkeys(files)
845 # for dirstate.walk, files=['.'] means "walk the whole tree".
847 # for dirstate.walk, files=['.'] means "walk the whole tree".
846 # follow that here, too
848 # follow that here, too
847 fdict.pop('.', None)
849 fdict.pop('.', None)
848 mdict = self.manifest.read(self.changelog.read(node)[0])
850 mdict = self.manifest.read(self.changelog.read(node)[0])
849 mfiles = mdict.keys()
851 mfiles = mdict.keys()
850 mfiles.sort()
852 mfiles.sort()
851 for fn in mfiles:
853 for fn in mfiles:
852 for ffn in fdict:
854 for ffn in fdict:
853 # match if the file is the exact name or a directory
855 # match if the file is the exact name or a directory
854 if ffn == fn or fn.startswith("%s/" % ffn):
856 if ffn == fn or fn.startswith("%s/" % ffn):
855 del fdict[ffn]
857 del fdict[ffn]
856 break
858 break
857 if match(fn):
859 if match(fn):
858 yield 'm', fn
860 yield 'm', fn
859 ffiles = fdict.keys()
861 ffiles = fdict.keys()
860 ffiles.sort()
862 ffiles.sort()
861 for fn in ffiles:
863 for fn in ffiles:
862 if badmatch and badmatch(fn):
864 if badmatch and badmatch(fn):
863 if match(fn):
865 if match(fn):
864 yield 'b', fn
866 yield 'b', fn
865 else:
867 else:
866 self.ui.warn(_('%s: No such file in rev %s\n')
868 self.ui.warn(_('%s: No such file in rev %s\n')
867 % (self.pathto(fn), short(node)))
869 % (self.pathto(fn), short(node)))
868 else:
870 else:
869 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
871 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
870 yield src, fn
872 yield src, fn
871
873
872 def status(self, node1=None, node2=None, files=[], match=util.always,
874 def status(self, node1=None, node2=None, files=[], match=util.always,
873 list_ignored=False, list_clean=False):
875 list_ignored=False, list_clean=False):
874 """return status of files between two nodes or node and working directory
876 """return status of files between two nodes or node and working directory
875
877
876 If node1 is None, use the first dirstate parent instead.
878 If node1 is None, use the first dirstate parent instead.
877 If node2 is None, compare node1 with working directory.
879 If node2 is None, compare node1 with working directory.
878 """
880 """
879
881
880 def fcmp(fn, getnode):
882 def fcmp(fn, getnode):
881 t1 = self.wread(fn)
883 t1 = self.wread(fn)
882 return self.file(fn).cmp(getnode(fn), t1)
884 return self.file(fn).cmp(getnode(fn), t1)
883
885
884 def mfmatches(node):
886 def mfmatches(node):
885 change = self.changelog.read(node)
887 change = self.changelog.read(node)
886 mf = self.manifest.read(change[0]).copy()
888 mf = self.manifest.read(change[0]).copy()
887 for fn in mf.keys():
889 for fn in mf.keys():
888 if not match(fn):
890 if not match(fn):
889 del mf[fn]
891 del mf[fn]
890 return mf
892 return mf
891
893
892 modified, added, removed, deleted, unknown = [], [], [], [], []
894 modified, added, removed, deleted, unknown = [], [], [], [], []
893 ignored, clean = [], []
895 ignored, clean = [], []
894
896
895 compareworking = False
897 compareworking = False
896 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
898 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
897 compareworking = True
899 compareworking = True
898
900
899 if not compareworking:
901 if not compareworking:
900 # read the manifest from node1 before the manifest from node2,
902 # read the manifest from node1 before the manifest from node2,
901 # so that we'll hit the manifest cache if we're going through
903 # so that we'll hit the manifest cache if we're going through
902 # all the revisions in parent->child order.
904 # all the revisions in parent->child order.
903 mf1 = mfmatches(node1)
905 mf1 = mfmatches(node1)
904
906
905 # are we comparing the working directory?
907 # are we comparing the working directory?
906 if not node2:
908 if not node2:
907 (lookup, modified, added, removed, deleted, unknown,
909 (lookup, modified, added, removed, deleted, unknown,
908 ignored, clean) = self.dirstate.status(files, match,
910 ignored, clean) = self.dirstate.status(files, match,
909 list_ignored, list_clean)
911 list_ignored, list_clean)
910
912
911 # are we comparing working dir against its parent?
913 # are we comparing working dir against its parent?
912 if compareworking:
914 if compareworking:
913 if lookup:
915 if lookup:
914 fixup = []
916 fixup = []
915 # do a full compare of any files that might have changed
917 # do a full compare of any files that might have changed
916 ctx = self.changectx()
918 ctx = self.changectx()
917 for f in lookup:
919 for f in lookup:
918 if f not in ctx or ctx[f].cmp(self.wread(f)):
920 if f not in ctx or ctx[f].cmp(self.wread(f)):
919 modified.append(f)
921 modified.append(f)
920 else:
922 else:
921 fixup.append(f)
923 fixup.append(f)
922 if list_clean:
924 if list_clean:
923 clean.append(f)
925 clean.append(f)
924
926
925 # update dirstate for files that are actually clean
927 # update dirstate for files that are actually clean
926 if fixup:
928 if fixup:
927 wlock = None
929 wlock = None
928 try:
930 try:
929 try:
931 try:
930 wlock = self.wlock(False)
932 wlock = self.wlock(False)
931 except lock.LockException:
933 except lock.LockException:
932 pass
934 pass
933 if wlock:
935 if wlock:
934 for f in fixup:
936 for f in fixup:
935 self.dirstate.normal(f)
937 self.dirstate.normal(f)
936 finally:
938 finally:
937 del wlock
939 del wlock
938 else:
940 else:
939 # we are comparing working dir against non-parent
941 # we are comparing working dir against non-parent
940 # generate a pseudo-manifest for the working dir
942 # generate a pseudo-manifest for the working dir
941 # XXX: create it in dirstate.py ?
943 # XXX: create it in dirstate.py ?
942 mf2 = mfmatches(self.dirstate.parents()[0])
944 mf2 = mfmatches(self.dirstate.parents()[0])
943 is_exec = util.execfunc(self.root, mf2.execf)
945 is_exec = util.execfunc(self.root, mf2.execf)
944 is_link = util.linkfunc(self.root, mf2.linkf)
946 is_link = util.linkfunc(self.root, mf2.linkf)
945 for f in lookup + modified + added:
947 for f in lookup + modified + added:
946 mf2[f] = ""
948 mf2[f] = ""
947 mf2.set(f, is_exec(f), is_link(f))
949 mf2.set(f, is_exec(f), is_link(f))
948 for f in removed:
950 for f in removed:
949 if f in mf2:
951 if f in mf2:
950 del mf2[f]
952 del mf2[f]
951
953
952 else:
954 else:
953 # we are comparing two revisions
955 # we are comparing two revisions
954 mf2 = mfmatches(node2)
956 mf2 = mfmatches(node2)
955
957
956 if not compareworking:
958 if not compareworking:
957 # flush lists from dirstate before comparing manifests
959 # flush lists from dirstate before comparing manifests
958 modified, added, clean = [], [], []
960 modified, added, clean = [], [], []
959
961
960 # make sure to sort the files so we talk to the disk in a
962 # make sure to sort the files so we talk to the disk in a
961 # reasonable order
963 # reasonable order
962 mf2keys = mf2.keys()
964 mf2keys = mf2.keys()
963 mf2keys.sort()
965 mf2keys.sort()
964 getnode = lambda fn: mf1.get(fn, nullid)
966 getnode = lambda fn: mf1.get(fn, nullid)
965 for fn in mf2keys:
967 for fn in mf2keys:
966 if mf1.has_key(fn):
968 if mf1.has_key(fn):
967 if (mf1.flags(fn) != mf2.flags(fn) or
969 if (mf1.flags(fn) != mf2.flags(fn) or
968 (mf1[fn] != mf2[fn] and
970 (mf1[fn] != mf2[fn] and
969 (mf2[fn] != "" or fcmp(fn, getnode)))):
971 (mf2[fn] != "" or fcmp(fn, getnode)))):
970 modified.append(fn)
972 modified.append(fn)
971 elif list_clean:
973 elif list_clean:
972 clean.append(fn)
974 clean.append(fn)
973 del mf1[fn]
975 del mf1[fn]
974 else:
976 else:
975 added.append(fn)
977 added.append(fn)
976
978
977 removed = mf1.keys()
979 removed = mf1.keys()
978
980
979 # sort and return results:
981 # sort and return results:
980 for l in modified, added, removed, deleted, unknown, ignored, clean:
982 for l in modified, added, removed, deleted, unknown, ignored, clean:
981 l.sort()
983 l.sort()
982 return (modified, added, removed, deleted, unknown, ignored, clean)
984 return (modified, added, removed, deleted, unknown, ignored, clean)
983
985
984 def add(self, list):
986 def add(self, list):
985 wlock = self.wlock()
987 wlock = self.wlock()
986 try:
988 try:
987 for f in list:
989 for f in list:
988 p = self.wjoin(f)
990 p = self.wjoin(f)
989 try:
991 try:
990 st = os.lstat(p)
992 st = os.lstat(p)
991 except:
993 except:
992 self.ui.warn(_("%s does not exist!\n") % f)
994 self.ui.warn(_("%s does not exist!\n") % f)
993 continue
995 continue
994 if st.st_size > 10000000:
996 if st.st_size > 10000000:
995 self.ui.warn(_("%s: files over 10MB may cause memory and"
997 self.ui.warn(_("%s: files over 10MB may cause memory and"
996 " performance problems\n"
998 " performance problems\n"
997 "(use 'hg revert %s' to unadd the file)\n")
999 "(use 'hg revert %s' to unadd the file)\n")
998 % (f, f))
1000 % (f, f))
999 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1001 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1000 self.ui.warn(_("%s not added: only files and symlinks "
1002 self.ui.warn(_("%s not added: only files and symlinks "
1001 "supported currently\n") % f)
1003 "supported currently\n") % f)
1002 elif self.dirstate[f] in 'amn':
1004 elif self.dirstate[f] in 'amn':
1003 self.ui.warn(_("%s already tracked!\n") % f)
1005 self.ui.warn(_("%s already tracked!\n") % f)
1004 elif self.dirstate[f] == 'r':
1006 elif self.dirstate[f] == 'r':
1005 self.dirstate.normallookup(f)
1007 self.dirstate.normallookup(f)
1006 else:
1008 else:
1007 self.dirstate.add(f)
1009 self.dirstate.add(f)
1008 finally:
1010 finally:
1009 del wlock
1011 del wlock
1010
1012
1011 def forget(self, list):
1013 def forget(self, list):
1012 wlock = self.wlock()
1014 wlock = self.wlock()
1013 try:
1015 try:
1014 for f in list:
1016 for f in list:
1015 if self.dirstate[f] != 'a':
1017 if self.dirstate[f] != 'a':
1016 self.ui.warn(_("%s not added!\n") % f)
1018 self.ui.warn(_("%s not added!\n") % f)
1017 else:
1019 else:
1018 self.dirstate.forget(f)
1020 self.dirstate.forget(f)
1019 finally:
1021 finally:
1020 del wlock
1022 del wlock
1021
1023
1022 def remove(self, list, unlink=False):
1024 def remove(self, list, unlink=False):
1023 wlock = None
1025 wlock = None
1024 try:
1026 try:
1025 if unlink:
1027 if unlink:
1026 for f in list:
1028 for f in list:
1027 try:
1029 try:
1028 util.unlink(self.wjoin(f))
1030 util.unlink(self.wjoin(f))
1029 except OSError, inst:
1031 except OSError, inst:
1030 if inst.errno != errno.ENOENT:
1032 if inst.errno != errno.ENOENT:
1031 raise
1033 raise
1032 wlock = self.wlock()
1034 wlock = self.wlock()
1033 for f in list:
1035 for f in list:
1034 if unlink and os.path.exists(self.wjoin(f)):
1036 if unlink and os.path.exists(self.wjoin(f)):
1035 self.ui.warn(_("%s still exists!\n") % f)
1037 self.ui.warn(_("%s still exists!\n") % f)
1036 elif self.dirstate[f] == 'a':
1038 elif self.dirstate[f] == 'a':
1037 self.dirstate.forget(f)
1039 self.dirstate.forget(f)
1038 elif f not in self.dirstate:
1040 elif f not in self.dirstate:
1039 self.ui.warn(_("%s not tracked!\n") % f)
1041 self.ui.warn(_("%s not tracked!\n") % f)
1040 else:
1042 else:
1041 self.dirstate.remove(f)
1043 self.dirstate.remove(f)
1042 finally:
1044 finally:
1043 del wlock
1045 del wlock
1044
1046
1045 def undelete(self, list):
1047 def undelete(self, list):
1046 wlock = None
1048 wlock = None
1047 try:
1049 try:
1048 manifests = [self.manifest.read(self.changelog.read(p)[0])
1050 manifests = [self.manifest.read(self.changelog.read(p)[0])
1049 for p in self.dirstate.parents() if p != nullid]
1051 for p in self.dirstate.parents() if p != nullid]
1050 wlock = self.wlock()
1052 wlock = self.wlock()
1051 for f in list:
1053 for f in list:
1052 if self.dirstate[f] != 'r':
1054 if self.dirstate[f] != 'r':
1053 self.ui.warn("%s not removed!\n" % f)
1055 self.ui.warn("%s not removed!\n" % f)
1054 else:
1056 else:
1055 m = f in manifests[0] and manifests[0] or manifests[1]
1057 m = f in manifests[0] and manifests[0] or manifests[1]
1056 t = self.file(f).read(m[f])
1058 t = self.file(f).read(m[f])
1057 self.wwrite(f, t, m.flags(f))
1059 self.wwrite(f, t, m.flags(f))
1058 self.dirstate.normal(f)
1060 self.dirstate.normal(f)
1059 finally:
1061 finally:
1060 del wlock
1062 del wlock
1061
1063
1062 def copy(self, source, dest):
1064 def copy(self, source, dest):
1063 wlock = None
1065 wlock = None
1064 try:
1066 try:
1065 p = self.wjoin(dest)
1067 p = self.wjoin(dest)
1066 if not (os.path.exists(p) or os.path.islink(p)):
1068 if not (os.path.exists(p) or os.path.islink(p)):
1067 self.ui.warn(_("%s does not exist!\n") % dest)
1069 self.ui.warn(_("%s does not exist!\n") % dest)
1068 elif not (os.path.isfile(p) or os.path.islink(p)):
1070 elif not (os.path.isfile(p) or os.path.islink(p)):
1069 self.ui.warn(_("copy failed: %s is not a file or a "
1071 self.ui.warn(_("copy failed: %s is not a file or a "
1070 "symbolic link\n") % dest)
1072 "symbolic link\n") % dest)
1071 else:
1073 else:
1072 wlock = self.wlock()
1074 wlock = self.wlock()
1073 if dest not in self.dirstate:
1075 if dest not in self.dirstate:
1074 self.dirstate.add(dest)
1076 self.dirstate.add(dest)
1075 self.dirstate.copy(source, dest)
1077 self.dirstate.copy(source, dest)
1076 finally:
1078 finally:
1077 del wlock
1079 del wlock
1078
1080
1079 def heads(self, start=None):
1081 def heads(self, start=None):
1080 heads = self.changelog.heads(start)
1082 heads = self.changelog.heads(start)
1081 # sort the output in rev descending order
1083 # sort the output in rev descending order
1082 heads = [(-self.changelog.rev(h), h) for h in heads]
1084 heads = [(-self.changelog.rev(h), h) for h in heads]
1083 heads.sort()
1085 heads.sort()
1084 return [n for (r, n) in heads]
1086 return [n for (r, n) in heads]
1085
1087
1086 def branchheads(self, branch, start=None):
1088 def branchheads(self, branch, start=None):
1087 branches = self.branchtags()
1089 branches = self.branchtags()
1088 if branch not in branches:
1090 if branch not in branches:
1089 return []
1091 return []
1090 # The basic algorithm is this:
1092 # The basic algorithm is this:
1091 #
1093 #
1092 # Start from the branch tip since there are no later revisions that can
1094 # Start from the branch tip since there are no later revisions that can
1093 # possibly be in this branch, and the tip is a guaranteed head.
1095 # possibly be in this branch, and the tip is a guaranteed head.
1094 #
1096 #
1095 # Remember the tip's parents as the first ancestors, since these by
1097 # Remember the tip's parents as the first ancestors, since these by
1096 # definition are not heads.
1098 # definition are not heads.
1097 #
1099 #
1098 # Step backwards from the brach tip through all the revisions. We are
1100 # Step backwards from the brach tip through all the revisions. We are
1099 # guaranteed by the rules of Mercurial that we will now be visiting the
1101 # guaranteed by the rules of Mercurial that we will now be visiting the
1100 # nodes in reverse topological order (children before parents).
1102 # nodes in reverse topological order (children before parents).
1101 #
1103 #
1102 # If a revision is one of the ancestors of a head then we can toss it
1104 # If a revision is one of the ancestors of a head then we can toss it
1103 # out of the ancestors set (we've already found it and won't be
1105 # out of the ancestors set (we've already found it and won't be
1104 # visiting it again) and put its parents in the ancestors set.
1106 # visiting it again) and put its parents in the ancestors set.
1105 #
1107 #
1106 # Otherwise, if a revision is in the branch it's another head, since it
1108 # Otherwise, if a revision is in the branch it's another head, since it
1107 # wasn't in the ancestor list of an existing head. So add it to the
1109 # wasn't in the ancestor list of an existing head. So add it to the
1108 # head list, and add its parents to the ancestor list.
1110 # head list, and add its parents to the ancestor list.
1109 #
1111 #
1110 # If it is not in the branch ignore it.
1112 # If it is not in the branch ignore it.
1111 #
1113 #
1112 # Once we have a list of heads, use nodesbetween to filter out all the
1114 # Once we have a list of heads, use nodesbetween to filter out all the
1113 # heads that cannot be reached from startrev. There may be a more
1115 # heads that cannot be reached from startrev. There may be a more
1114 # efficient way to do this as part of the previous algorithm.
1116 # efficient way to do this as part of the previous algorithm.
1115
1117
1116 set = util.set
1118 set = util.set
1117 heads = [self.changelog.rev(branches[branch])]
1119 heads = [self.changelog.rev(branches[branch])]
1118 # Don't care if ancestors contains nullrev or not.
1120 # Don't care if ancestors contains nullrev or not.
1119 ancestors = set(self.changelog.parentrevs(heads[0]))
1121 ancestors = set(self.changelog.parentrevs(heads[0]))
1120 for rev in xrange(heads[0] - 1, nullrev, -1):
1122 for rev in xrange(heads[0] - 1, nullrev, -1):
1121 if rev in ancestors:
1123 if rev in ancestors:
1122 ancestors.update(self.changelog.parentrevs(rev))
1124 ancestors.update(self.changelog.parentrevs(rev))
1123 ancestors.remove(rev)
1125 ancestors.remove(rev)
1124 elif self.changectx(rev).branch() == branch:
1126 elif self.changectx(rev).branch() == branch:
1125 heads.append(rev)
1127 heads.append(rev)
1126 ancestors.update(self.changelog.parentrevs(rev))
1128 ancestors.update(self.changelog.parentrevs(rev))
1127 heads = [self.changelog.node(rev) for rev in heads]
1129 heads = [self.changelog.node(rev) for rev in heads]
1128 if start is not None:
1130 if start is not None:
1129 heads = self.changelog.nodesbetween([start], heads)[2]
1131 heads = self.changelog.nodesbetween([start], heads)[2]
1130 return heads
1132 return heads
1131
1133
1132 def branches(self, nodes):
1134 def branches(self, nodes):
1133 if not nodes:
1135 if not nodes:
1134 nodes = [self.changelog.tip()]
1136 nodes = [self.changelog.tip()]
1135 b = []
1137 b = []
1136 for n in nodes:
1138 for n in nodes:
1137 t = n
1139 t = n
1138 while 1:
1140 while 1:
1139 p = self.changelog.parents(n)
1141 p = self.changelog.parents(n)
1140 if p[1] != nullid or p[0] == nullid:
1142 if p[1] != nullid or p[0] == nullid:
1141 b.append((t, n, p[0], p[1]))
1143 b.append((t, n, p[0], p[1]))
1142 break
1144 break
1143 n = p[0]
1145 n = p[0]
1144 return b
1146 return b
1145
1147
1146 def between(self, pairs):
1148 def between(self, pairs):
1147 r = []
1149 r = []
1148
1150
1149 for top, bottom in pairs:
1151 for top, bottom in pairs:
1150 n, l, i = top, [], 0
1152 n, l, i = top, [], 0
1151 f = 1
1153 f = 1
1152
1154
1153 while n != bottom:
1155 while n != bottom:
1154 p = self.changelog.parents(n)[0]
1156 p = self.changelog.parents(n)[0]
1155 if i == f:
1157 if i == f:
1156 l.append(n)
1158 l.append(n)
1157 f = f * 2
1159 f = f * 2
1158 n = p
1160 n = p
1159 i += 1
1161 i += 1
1160
1162
1161 r.append(l)
1163 r.append(l)
1162
1164
1163 return r
1165 return r
1164
1166
1165 def findincoming(self, remote, base=None, heads=None, force=False):
1167 def findincoming(self, remote, base=None, heads=None, force=False):
1166 """Return list of roots of the subsets of missing nodes from remote
1168 """Return list of roots of the subsets of missing nodes from remote
1167
1169
1168 If base dict is specified, assume that these nodes and their parents
1170 If base dict is specified, assume that these nodes and their parents
1169 exist on the remote side and that no child of a node of base exists
1171 exist on the remote side and that no child of a node of base exists
1170 in both remote and self.
1172 in both remote and self.
1171 Furthermore base will be updated to include the nodes that exists
1173 Furthermore base will be updated to include the nodes that exists
1172 in self and remote but no children exists in self and remote.
1174 in self and remote but no children exists in self and remote.
1173 If a list of heads is specified, return only nodes which are heads
1175 If a list of heads is specified, return only nodes which are heads
1174 or ancestors of these heads.
1176 or ancestors of these heads.
1175
1177
1176 All the ancestors of base are in self and in remote.
1178 All the ancestors of base are in self and in remote.
1177 All the descendants of the list returned are missing in self.
1179 All the descendants of the list returned are missing in self.
1178 (and so we know that the rest of the nodes are missing in remote, see
1180 (and so we know that the rest of the nodes are missing in remote, see
1179 outgoing)
1181 outgoing)
1180 """
1182 """
1181 m = self.changelog.nodemap
1183 m = self.changelog.nodemap
1182 search = []
1184 search = []
1183 fetch = {}
1185 fetch = {}
1184 seen = {}
1186 seen = {}
1185 seenbranch = {}
1187 seenbranch = {}
1186 if base == None:
1188 if base == None:
1187 base = {}
1189 base = {}
1188
1190
1189 if not heads:
1191 if not heads:
1190 heads = remote.heads()
1192 heads = remote.heads()
1191
1193
1192 if self.changelog.tip() == nullid:
1194 if self.changelog.tip() == nullid:
1193 base[nullid] = 1
1195 base[nullid] = 1
1194 if heads != [nullid]:
1196 if heads != [nullid]:
1195 return [nullid]
1197 return [nullid]
1196 return []
1198 return []
1197
1199
1198 # assume we're closer to the tip than the root
1200 # assume we're closer to the tip than the root
1199 # and start by examining the heads
1201 # and start by examining the heads
1200 self.ui.status(_("searching for changes\n"))
1202 self.ui.status(_("searching for changes\n"))
1201
1203
1202 unknown = []
1204 unknown = []
1203 for h in heads:
1205 for h in heads:
1204 if h not in m:
1206 if h not in m:
1205 unknown.append(h)
1207 unknown.append(h)
1206 else:
1208 else:
1207 base[h] = 1
1209 base[h] = 1
1208
1210
1209 if not unknown:
1211 if not unknown:
1210 return []
1212 return []
1211
1213
1212 req = dict.fromkeys(unknown)
1214 req = dict.fromkeys(unknown)
1213 reqcnt = 0
1215 reqcnt = 0
1214
1216
1215 # search through remote branches
1217 # search through remote branches
1216 # a 'branch' here is a linear segment of history, with four parts:
1218 # a 'branch' here is a linear segment of history, with four parts:
1217 # head, root, first parent, second parent
1219 # head, root, first parent, second parent
1218 # (a branch always has two parents (or none) by definition)
1220 # (a branch always has two parents (or none) by definition)
1219 unknown = remote.branches(unknown)
1221 unknown = remote.branches(unknown)
1220 while unknown:
1222 while unknown:
1221 r = []
1223 r = []
1222 while unknown:
1224 while unknown:
1223 n = unknown.pop(0)
1225 n = unknown.pop(0)
1224 if n[0] in seen:
1226 if n[0] in seen:
1225 continue
1227 continue
1226
1228
1227 self.ui.debug(_("examining %s:%s\n")
1229 self.ui.debug(_("examining %s:%s\n")
1228 % (short(n[0]), short(n[1])))
1230 % (short(n[0]), short(n[1])))
1229 if n[0] == nullid: # found the end of the branch
1231 if n[0] == nullid: # found the end of the branch
1230 pass
1232 pass
1231 elif n in seenbranch:
1233 elif n in seenbranch:
1232 self.ui.debug(_("branch already found\n"))
1234 self.ui.debug(_("branch already found\n"))
1233 continue
1235 continue
1234 elif n[1] and n[1] in m: # do we know the base?
1236 elif n[1] and n[1] in m: # do we know the base?
1235 self.ui.debug(_("found incomplete branch %s:%s\n")
1237 self.ui.debug(_("found incomplete branch %s:%s\n")
1236 % (short(n[0]), short(n[1])))
1238 % (short(n[0]), short(n[1])))
1237 search.append(n) # schedule branch range for scanning
1239 search.append(n) # schedule branch range for scanning
1238 seenbranch[n] = 1
1240 seenbranch[n] = 1
1239 else:
1241 else:
1240 if n[1] not in seen and n[1] not in fetch:
1242 if n[1] not in seen and n[1] not in fetch:
1241 if n[2] in m and n[3] in m:
1243 if n[2] in m and n[3] in m:
1242 self.ui.debug(_("found new changeset %s\n") %
1244 self.ui.debug(_("found new changeset %s\n") %
1243 short(n[1]))
1245 short(n[1]))
1244 fetch[n[1]] = 1 # earliest unknown
1246 fetch[n[1]] = 1 # earliest unknown
1245 for p in n[2:4]:
1247 for p in n[2:4]:
1246 if p in m:
1248 if p in m:
1247 base[p] = 1 # latest known
1249 base[p] = 1 # latest known
1248
1250
1249 for p in n[2:4]:
1251 for p in n[2:4]:
1250 if p not in req and p not in m:
1252 if p not in req and p not in m:
1251 r.append(p)
1253 r.append(p)
1252 req[p] = 1
1254 req[p] = 1
1253 seen[n[0]] = 1
1255 seen[n[0]] = 1
1254
1256
1255 if r:
1257 if r:
1256 reqcnt += 1
1258 reqcnt += 1
1257 self.ui.debug(_("request %d: %s\n") %
1259 self.ui.debug(_("request %d: %s\n") %
1258 (reqcnt, " ".join(map(short, r))))
1260 (reqcnt, " ".join(map(short, r))))
1259 for p in xrange(0, len(r), 10):
1261 for p in xrange(0, len(r), 10):
1260 for b in remote.branches(r[p:p+10]):
1262 for b in remote.branches(r[p:p+10]):
1261 self.ui.debug(_("received %s:%s\n") %
1263 self.ui.debug(_("received %s:%s\n") %
1262 (short(b[0]), short(b[1])))
1264 (short(b[0]), short(b[1])))
1263 unknown.append(b)
1265 unknown.append(b)
1264
1266
1265 # do binary search on the branches we found
1267 # do binary search on the branches we found
1266 while search:
1268 while search:
1267 n = search.pop(0)
1269 n = search.pop(0)
1268 reqcnt += 1
1270 reqcnt += 1
1269 l = remote.between([(n[0], n[1])])[0]
1271 l = remote.between([(n[0], n[1])])[0]
1270 l.append(n[1])
1272 l.append(n[1])
1271 p = n[0]
1273 p = n[0]
1272 f = 1
1274 f = 1
1273 for i in l:
1275 for i in l:
1274 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1276 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1275 if i in m:
1277 if i in m:
1276 if f <= 2:
1278 if f <= 2:
1277 self.ui.debug(_("found new branch changeset %s\n") %
1279 self.ui.debug(_("found new branch changeset %s\n") %
1278 short(p))
1280 short(p))
1279 fetch[p] = 1
1281 fetch[p] = 1
1280 base[i] = 1
1282 base[i] = 1
1281 else:
1283 else:
1282 self.ui.debug(_("narrowed branch search to %s:%s\n")
1284 self.ui.debug(_("narrowed branch search to %s:%s\n")
1283 % (short(p), short(i)))
1285 % (short(p), short(i)))
1284 search.append((p, i))
1286 search.append((p, i))
1285 break
1287 break
1286 p, f = i, f * 2
1288 p, f = i, f * 2
1287
1289
1288 # sanity check our fetch list
1290 # sanity check our fetch list
1289 for f in fetch.keys():
1291 for f in fetch.keys():
1290 if f in m:
1292 if f in m:
1291 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1293 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1292
1294
1293 if base.keys() == [nullid]:
1295 if base.keys() == [nullid]:
1294 if force:
1296 if force:
1295 self.ui.warn(_("warning: repository is unrelated\n"))
1297 self.ui.warn(_("warning: repository is unrelated\n"))
1296 else:
1298 else:
1297 raise util.Abort(_("repository is unrelated"))
1299 raise util.Abort(_("repository is unrelated"))
1298
1300
1299 self.ui.debug(_("found new changesets starting at ") +
1301 self.ui.debug(_("found new changesets starting at ") +
1300 " ".join([short(f) for f in fetch]) + "\n")
1302 " ".join([short(f) for f in fetch]) + "\n")
1301
1303
1302 self.ui.debug(_("%d total queries\n") % reqcnt)
1304 self.ui.debug(_("%d total queries\n") % reqcnt)
1303
1305
1304 return fetch.keys()
1306 return fetch.keys()
1305
1307
1306 def findoutgoing(self, remote, base=None, heads=None, force=False):
1308 def findoutgoing(self, remote, base=None, heads=None, force=False):
1307 """Return list of nodes that are roots of subsets not in remote
1309 """Return list of nodes that are roots of subsets not in remote
1308
1310
1309 If base dict is specified, assume that these nodes and their parents
1311 If base dict is specified, assume that these nodes and their parents
1310 exist on the remote side.
1312 exist on the remote side.
1311 If a list of heads is specified, return only nodes which are heads
1313 If a list of heads is specified, return only nodes which are heads
1312 or ancestors of these heads, and return a second element which
1314 or ancestors of these heads, and return a second element which
1313 contains all remote heads which get new children.
1315 contains all remote heads which get new children.
1314 """
1316 """
1315 if base == None:
1317 if base == None:
1316 base = {}
1318 base = {}
1317 self.findincoming(remote, base, heads, force=force)
1319 self.findincoming(remote, base, heads, force=force)
1318
1320
1319 self.ui.debug(_("common changesets up to ")
1321 self.ui.debug(_("common changesets up to ")
1320 + " ".join(map(short, base.keys())) + "\n")
1322 + " ".join(map(short, base.keys())) + "\n")
1321
1323
1322 remain = dict.fromkeys(self.changelog.nodemap)
1324 remain = dict.fromkeys(self.changelog.nodemap)
1323
1325
1324 # prune everything remote has from the tree
1326 # prune everything remote has from the tree
1325 del remain[nullid]
1327 del remain[nullid]
1326 remove = base.keys()
1328 remove = base.keys()
1327 while remove:
1329 while remove:
1328 n = remove.pop(0)
1330 n = remove.pop(0)
1329 if n in remain:
1331 if n in remain:
1330 del remain[n]
1332 del remain[n]
1331 for p in self.changelog.parents(n):
1333 for p in self.changelog.parents(n):
1332 remove.append(p)
1334 remove.append(p)
1333
1335
1334 # find every node whose parents have been pruned
1336 # find every node whose parents have been pruned
1335 subset = []
1337 subset = []
1336 # find every remote head that will get new children
1338 # find every remote head that will get new children
1337 updated_heads = {}
1339 updated_heads = {}
1338 for n in remain:
1340 for n in remain:
1339 p1, p2 = self.changelog.parents(n)
1341 p1, p2 = self.changelog.parents(n)
1340 if p1 not in remain and p2 not in remain:
1342 if p1 not in remain and p2 not in remain:
1341 subset.append(n)
1343 subset.append(n)
1342 if heads:
1344 if heads:
1343 if p1 in heads:
1345 if p1 in heads:
1344 updated_heads[p1] = True
1346 updated_heads[p1] = True
1345 if p2 in heads:
1347 if p2 in heads:
1346 updated_heads[p2] = True
1348 updated_heads[p2] = True
1347
1349
1348 # this is the set of all roots we have to push
1350 # this is the set of all roots we have to push
1349 if heads:
1351 if heads:
1350 return subset, updated_heads.keys()
1352 return subset, updated_heads.keys()
1351 else:
1353 else:
1352 return subset
1354 return subset
1353
1355
1354 def pull(self, remote, heads=None, force=False):
1356 def pull(self, remote, heads=None, force=False):
1355 lock = self.lock()
1357 lock = self.lock()
1356 try:
1358 try:
1357 fetch = self.findincoming(remote, heads=heads, force=force)
1359 fetch = self.findincoming(remote, heads=heads, force=force)
1358 if fetch == [nullid]:
1360 if fetch == [nullid]:
1359 self.ui.status(_("requesting all changes\n"))
1361 self.ui.status(_("requesting all changes\n"))
1360
1362
1361 if not fetch:
1363 if not fetch:
1362 self.ui.status(_("no changes found\n"))
1364 self.ui.status(_("no changes found\n"))
1363 return 0
1365 return 0
1364
1366
1365 if heads is None:
1367 if heads is None:
1366 cg = remote.changegroup(fetch, 'pull')
1368 cg = remote.changegroup(fetch, 'pull')
1367 else:
1369 else:
1368 if 'changegroupsubset' not in remote.capabilities:
1370 if 'changegroupsubset' not in remote.capabilities:
1369 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1371 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1370 cg = remote.changegroupsubset(fetch, heads, 'pull')
1372 cg = remote.changegroupsubset(fetch, heads, 'pull')
1371 return self.addchangegroup(cg, 'pull', remote.url())
1373 return self.addchangegroup(cg, 'pull', remote.url())
1372 finally:
1374 finally:
1373 del lock
1375 del lock
1374
1376
1375 def push(self, remote, force=False, revs=None):
1377 def push(self, remote, force=False, revs=None):
1376 # there are two ways to push to remote repo:
1378 # there are two ways to push to remote repo:
1377 #
1379 #
1378 # addchangegroup assumes local user can lock remote
1380 # addchangegroup assumes local user can lock remote
1379 # repo (local filesystem, old ssh servers).
1381 # repo (local filesystem, old ssh servers).
1380 #
1382 #
1381 # unbundle assumes local user cannot lock remote repo (new ssh
1383 # unbundle assumes local user cannot lock remote repo (new ssh
1382 # servers, http servers).
1384 # servers, http servers).
1383
1385
1384 if remote.capable('unbundle'):
1386 if remote.capable('unbundle'):
1385 return self.push_unbundle(remote, force, revs)
1387 return self.push_unbundle(remote, force, revs)
1386 return self.push_addchangegroup(remote, force, revs)
1388 return self.push_addchangegroup(remote, force, revs)
1387
1389
1388 def prepush(self, remote, force, revs):
1390 def prepush(self, remote, force, revs):
1389 base = {}
1391 base = {}
1390 remote_heads = remote.heads()
1392 remote_heads = remote.heads()
1391 inc = self.findincoming(remote, base, remote_heads, force=force)
1393 inc = self.findincoming(remote, base, remote_heads, force=force)
1392
1394
1393 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1395 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1394 if revs is not None:
1396 if revs is not None:
1395 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1397 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1396 else:
1398 else:
1397 bases, heads = update, self.changelog.heads()
1399 bases, heads = update, self.changelog.heads()
1398
1400
1399 if not bases:
1401 if not bases:
1400 self.ui.status(_("no changes found\n"))
1402 self.ui.status(_("no changes found\n"))
1401 return None, 1
1403 return None, 1
1402 elif not force:
1404 elif not force:
1403 # check if we're creating new remote heads
1405 # check if we're creating new remote heads
1404 # to be a remote head after push, node must be either
1406 # to be a remote head after push, node must be either
1405 # - unknown locally
1407 # - unknown locally
1406 # - a local outgoing head descended from update
1408 # - a local outgoing head descended from update
1407 # - a remote head that's known locally and not
1409 # - a remote head that's known locally and not
1408 # ancestral to an outgoing head
1410 # ancestral to an outgoing head
1409
1411
1410 warn = 0
1412 warn = 0
1411
1413
1412 if remote_heads == [nullid]:
1414 if remote_heads == [nullid]:
1413 warn = 0
1415 warn = 0
1414 elif not revs and len(heads) > len(remote_heads):
1416 elif not revs and len(heads) > len(remote_heads):
1415 warn = 1
1417 warn = 1
1416 else:
1418 else:
1417 newheads = list(heads)
1419 newheads = list(heads)
1418 for r in remote_heads:
1420 for r in remote_heads:
1419 if r in self.changelog.nodemap:
1421 if r in self.changelog.nodemap:
1420 desc = self.changelog.heads(r, heads)
1422 desc = self.changelog.heads(r, heads)
1421 l = [h for h in heads if h in desc]
1423 l = [h for h in heads if h in desc]
1422 if not l:
1424 if not l:
1423 newheads.append(r)
1425 newheads.append(r)
1424 else:
1426 else:
1425 newheads.append(r)
1427 newheads.append(r)
1426 if len(newheads) > len(remote_heads):
1428 if len(newheads) > len(remote_heads):
1427 warn = 1
1429 warn = 1
1428
1430
1429 if warn:
1431 if warn:
1430 self.ui.warn(_("abort: push creates new remote branches!\n"))
1432 self.ui.warn(_("abort: push creates new remote branches!\n"))
1431 self.ui.status(_("(did you forget to merge?"
1433 self.ui.status(_("(did you forget to merge?"
1432 " use push -f to force)\n"))
1434 " use push -f to force)\n"))
1433 return None, 1
1435 return None, 1
1434 elif inc:
1436 elif inc:
1435 self.ui.warn(_("note: unsynced remote changes!\n"))
1437 self.ui.warn(_("note: unsynced remote changes!\n"))
1436
1438
1437
1439
1438 if revs is None:
1440 if revs is None:
1439 cg = self.changegroup(update, 'push')
1441 cg = self.changegroup(update, 'push')
1440 else:
1442 else:
1441 cg = self.changegroupsubset(update, revs, 'push')
1443 cg = self.changegroupsubset(update, revs, 'push')
1442 return cg, remote_heads
1444 return cg, remote_heads
1443
1445
1444 def push_addchangegroup(self, remote, force, revs):
1446 def push_addchangegroup(self, remote, force, revs):
1445 lock = remote.lock()
1447 lock = remote.lock()
1446 try:
1448 try:
1447 ret = self.prepush(remote, force, revs)
1449 ret = self.prepush(remote, force, revs)
1448 if ret[0] is not None:
1450 if ret[0] is not None:
1449 cg, remote_heads = ret
1451 cg, remote_heads = ret
1450 return remote.addchangegroup(cg, 'push', self.url())
1452 return remote.addchangegroup(cg, 'push', self.url())
1451 return ret[1]
1453 return ret[1]
1452 finally:
1454 finally:
1453 del lock
1455 del lock
1454
1456
1455 def push_unbundle(self, remote, force, revs):
1457 def push_unbundle(self, remote, force, revs):
1456 # local repo finds heads on server, finds out what revs it
1458 # local repo finds heads on server, finds out what revs it
1457 # must push. once revs transferred, if server finds it has
1459 # must push. once revs transferred, if server finds it has
1458 # different heads (someone else won commit/push race), server
1460 # different heads (someone else won commit/push race), server
1459 # aborts.
1461 # aborts.
1460
1462
1461 ret = self.prepush(remote, force, revs)
1463 ret = self.prepush(remote, force, revs)
1462 if ret[0] is not None:
1464 if ret[0] is not None:
1463 cg, remote_heads = ret
1465 cg, remote_heads = ret
1464 if force: remote_heads = ['force']
1466 if force: remote_heads = ['force']
1465 return remote.unbundle(cg, remote_heads, 'push')
1467 return remote.unbundle(cg, remote_heads, 'push')
1466 return ret[1]
1468 return ret[1]
1467
1469
1468 def changegroupinfo(self, nodes):
1470 def changegroupinfo(self, nodes):
1469 self.ui.note(_("%d changesets found\n") % len(nodes))
1471 self.ui.note(_("%d changesets found\n") % len(nodes))
1470 if self.ui.debugflag:
1472 if self.ui.debugflag:
1471 self.ui.debug(_("List of changesets:\n"))
1473 self.ui.debug(_("List of changesets:\n"))
1472 for node in nodes:
1474 for node in nodes:
1473 self.ui.debug("%s\n" % hex(node))
1475 self.ui.debug("%s\n" % hex(node))
1474
1476
1475 def changegroupsubset(self, bases, heads, source):
1477 def changegroupsubset(self, bases, heads, source):
1476 """This function generates a changegroup consisting of all the nodes
1478 """This function generates a changegroup consisting of all the nodes
1477 that are descendents of any of the bases, and ancestors of any of
1479 that are descendents of any of the bases, and ancestors of any of
1478 the heads.
1480 the heads.
1479
1481
1480 It is fairly complex as determining which filenodes and which
1482 It is fairly complex as determining which filenodes and which
1481 manifest nodes need to be included for the changeset to be complete
1483 manifest nodes need to be included for the changeset to be complete
1482 is non-trivial.
1484 is non-trivial.
1483
1485
1484 Another wrinkle is doing the reverse, figuring out which changeset in
1486 Another wrinkle is doing the reverse, figuring out which changeset in
1485 the changegroup a particular filenode or manifestnode belongs to."""
1487 the changegroup a particular filenode or manifestnode belongs to."""
1486
1488
1487 self.hook('preoutgoing', throw=True, source=source)
1489 self.hook('preoutgoing', throw=True, source=source)
1488
1490
1489 # Set up some initial variables
1491 # Set up some initial variables
1490 # Make it easy to refer to self.changelog
1492 # Make it easy to refer to self.changelog
1491 cl = self.changelog
1493 cl = self.changelog
1492 # msng is short for missing - compute the list of changesets in this
1494 # msng is short for missing - compute the list of changesets in this
1493 # changegroup.
1495 # changegroup.
1494 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1496 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1495 self.changegroupinfo(msng_cl_lst)
1497 self.changegroupinfo(msng_cl_lst)
1496 # Some bases may turn out to be superfluous, and some heads may be
1498 # Some bases may turn out to be superfluous, and some heads may be
1497 # too. nodesbetween will return the minimal set of bases and heads
1499 # too. nodesbetween will return the minimal set of bases and heads
1498 # necessary to re-create the changegroup.
1500 # necessary to re-create the changegroup.
1499
1501
1500 # Known heads are the list of heads that it is assumed the recipient
1502 # Known heads are the list of heads that it is assumed the recipient
1501 # of this changegroup will know about.
1503 # of this changegroup will know about.
1502 knownheads = {}
1504 knownheads = {}
1503 # We assume that all parents of bases are known heads.
1505 # We assume that all parents of bases are known heads.
1504 for n in bases:
1506 for n in bases:
1505 for p in cl.parents(n):
1507 for p in cl.parents(n):
1506 if p != nullid:
1508 if p != nullid:
1507 knownheads[p] = 1
1509 knownheads[p] = 1
1508 knownheads = knownheads.keys()
1510 knownheads = knownheads.keys()
1509 if knownheads:
1511 if knownheads:
1510 # Now that we know what heads are known, we can compute which
1512 # Now that we know what heads are known, we can compute which
1511 # changesets are known. The recipient must know about all
1513 # changesets are known. The recipient must know about all
1512 # changesets required to reach the known heads from the null
1514 # changesets required to reach the known heads from the null
1513 # changeset.
1515 # changeset.
1514 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1516 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1515 junk = None
1517 junk = None
1516 # Transform the list into an ersatz set.
1518 # Transform the list into an ersatz set.
1517 has_cl_set = dict.fromkeys(has_cl_set)
1519 has_cl_set = dict.fromkeys(has_cl_set)
1518 else:
1520 else:
1519 # If there were no known heads, the recipient cannot be assumed to
1521 # If there were no known heads, the recipient cannot be assumed to
1520 # know about any changesets.
1522 # know about any changesets.
1521 has_cl_set = {}
1523 has_cl_set = {}
1522
1524
1523 # Make it easy to refer to self.manifest
1525 # Make it easy to refer to self.manifest
1524 mnfst = self.manifest
1526 mnfst = self.manifest
1525 # We don't know which manifests are missing yet
1527 # We don't know which manifests are missing yet
1526 msng_mnfst_set = {}
1528 msng_mnfst_set = {}
1527 # Nor do we know which filenodes are missing.
1529 # Nor do we know which filenodes are missing.
1528 msng_filenode_set = {}
1530 msng_filenode_set = {}
1529
1531
1530 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1532 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1531 junk = None
1533 junk = None
1532
1534
1533 # A changeset always belongs to itself, so the changenode lookup
1535 # A changeset always belongs to itself, so the changenode lookup
1534 # function for a changenode is identity.
1536 # function for a changenode is identity.
1535 def identity(x):
1537 def identity(x):
1536 return x
1538 return x
1537
1539
1538 # A function generating function. Sets up an environment for the
1540 # A function generating function. Sets up an environment for the
1539 # inner function.
1541 # inner function.
1540 def cmp_by_rev_func(revlog):
1542 def cmp_by_rev_func(revlog):
1541 # Compare two nodes by their revision number in the environment's
1543 # Compare two nodes by their revision number in the environment's
1542 # revision history. Since the revision number both represents the
1544 # revision history. Since the revision number both represents the
1543 # most efficient order to read the nodes in, and represents a
1545 # most efficient order to read the nodes in, and represents a
1544 # topological sorting of the nodes, this function is often useful.
1546 # topological sorting of the nodes, this function is often useful.
1545 def cmp_by_rev(a, b):
1547 def cmp_by_rev(a, b):
1546 return cmp(revlog.rev(a), revlog.rev(b))
1548 return cmp(revlog.rev(a), revlog.rev(b))
1547 return cmp_by_rev
1549 return cmp_by_rev
1548
1550
1549 # If we determine that a particular file or manifest node must be a
1551 # If we determine that a particular file or manifest node must be a
1550 # node that the recipient of the changegroup will already have, we can
1552 # node that the recipient of the changegroup will already have, we can
1551 # also assume the recipient will have all the parents. This function
1553 # also assume the recipient will have all the parents. This function
1552 # prunes them from the set of missing nodes.
1554 # prunes them from the set of missing nodes.
1553 def prune_parents(revlog, hasset, msngset):
1555 def prune_parents(revlog, hasset, msngset):
1554 haslst = hasset.keys()
1556 haslst = hasset.keys()
1555 haslst.sort(cmp_by_rev_func(revlog))
1557 haslst.sort(cmp_by_rev_func(revlog))
1556 for node in haslst:
1558 for node in haslst:
1557 parentlst = [p for p in revlog.parents(node) if p != nullid]
1559 parentlst = [p for p in revlog.parents(node) if p != nullid]
1558 while parentlst:
1560 while parentlst:
1559 n = parentlst.pop()
1561 n = parentlst.pop()
1560 if n not in hasset:
1562 if n not in hasset:
1561 hasset[n] = 1
1563 hasset[n] = 1
1562 p = [p for p in revlog.parents(n) if p != nullid]
1564 p = [p for p in revlog.parents(n) if p != nullid]
1563 parentlst.extend(p)
1565 parentlst.extend(p)
1564 for n in hasset:
1566 for n in hasset:
1565 msngset.pop(n, None)
1567 msngset.pop(n, None)
1566
1568
1567 # This is a function generating function used to set up an environment
1569 # This is a function generating function used to set up an environment
1568 # for the inner function to execute in.
1570 # for the inner function to execute in.
1569 def manifest_and_file_collector(changedfileset):
1571 def manifest_and_file_collector(changedfileset):
1570 # This is an information gathering function that gathers
1572 # This is an information gathering function that gathers
1571 # information from each changeset node that goes out as part of
1573 # information from each changeset node that goes out as part of
1572 # the changegroup. The information gathered is a list of which
1574 # the changegroup. The information gathered is a list of which
1573 # manifest nodes are potentially required (the recipient may
1575 # manifest nodes are potentially required (the recipient may
1574 # already have them) and total list of all files which were
1576 # already have them) and total list of all files which were
1575 # changed in any changeset in the changegroup.
1577 # changed in any changeset in the changegroup.
1576 #
1578 #
1577 # We also remember the first changenode we saw any manifest
1579 # We also remember the first changenode we saw any manifest
1578 # referenced by so we can later determine which changenode 'owns'
1580 # referenced by so we can later determine which changenode 'owns'
1579 # the manifest.
1581 # the manifest.
1580 def collect_manifests_and_files(clnode):
1582 def collect_manifests_and_files(clnode):
1581 c = cl.read(clnode)
1583 c = cl.read(clnode)
1582 for f in c[3]:
1584 for f in c[3]:
1583 # This is to make sure we only have one instance of each
1585 # This is to make sure we only have one instance of each
1584 # filename string for each filename.
1586 # filename string for each filename.
1585 changedfileset.setdefault(f, f)
1587 changedfileset.setdefault(f, f)
1586 msng_mnfst_set.setdefault(c[0], clnode)
1588 msng_mnfst_set.setdefault(c[0], clnode)
1587 return collect_manifests_and_files
1589 return collect_manifests_and_files
1588
1590
1589 # Figure out which manifest nodes (of the ones we think might be part
1591 # Figure out which manifest nodes (of the ones we think might be part
1590 # of the changegroup) the recipient must know about and remove them
1592 # of the changegroup) the recipient must know about and remove them
1591 # from the changegroup.
1593 # from the changegroup.
1592 def prune_manifests():
1594 def prune_manifests():
1593 has_mnfst_set = {}
1595 has_mnfst_set = {}
1594 for n in msng_mnfst_set:
1596 for n in msng_mnfst_set:
1595 # If a 'missing' manifest thinks it belongs to a changenode
1597 # If a 'missing' manifest thinks it belongs to a changenode
1596 # the recipient is assumed to have, obviously the recipient
1598 # the recipient is assumed to have, obviously the recipient
1597 # must have that manifest.
1599 # must have that manifest.
1598 linknode = cl.node(mnfst.linkrev(n))
1600 linknode = cl.node(mnfst.linkrev(n))
1599 if linknode in has_cl_set:
1601 if linknode in has_cl_set:
1600 has_mnfst_set[n] = 1
1602 has_mnfst_set[n] = 1
1601 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1603 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1602
1604
1603 # Use the information collected in collect_manifests_and_files to say
1605 # Use the information collected in collect_manifests_and_files to say
1604 # which changenode any manifestnode belongs to.
1606 # which changenode any manifestnode belongs to.
1605 def lookup_manifest_link(mnfstnode):
1607 def lookup_manifest_link(mnfstnode):
1606 return msng_mnfst_set[mnfstnode]
1608 return msng_mnfst_set[mnfstnode]
1607
1609
1608 # A function generating function that sets up the initial environment
1610 # A function generating function that sets up the initial environment
1609 # the inner function.
1611 # the inner function.
1610 def filenode_collector(changedfiles):
1612 def filenode_collector(changedfiles):
1611 next_rev = [0]
1613 next_rev = [0]
1612 # This gathers information from each manifestnode included in the
1614 # This gathers information from each manifestnode included in the
1613 # changegroup about which filenodes the manifest node references
1615 # changegroup about which filenodes the manifest node references
1614 # so we can include those in the changegroup too.
1616 # so we can include those in the changegroup too.
1615 #
1617 #
1616 # It also remembers which changenode each filenode belongs to. It
1618 # It also remembers which changenode each filenode belongs to. It
1617 # does this by assuming the a filenode belongs to the changenode
1619 # does this by assuming the a filenode belongs to the changenode
1618 # the first manifest that references it belongs to.
1620 # the first manifest that references it belongs to.
1619 def collect_msng_filenodes(mnfstnode):
1621 def collect_msng_filenodes(mnfstnode):
1620 r = mnfst.rev(mnfstnode)
1622 r = mnfst.rev(mnfstnode)
1621 if r == next_rev[0]:
1623 if r == next_rev[0]:
1622 # If the last rev we looked at was the one just previous,
1624 # If the last rev we looked at was the one just previous,
1623 # we only need to see a diff.
1625 # we only need to see a diff.
1624 deltamf = mnfst.readdelta(mnfstnode)
1626 deltamf = mnfst.readdelta(mnfstnode)
1625 # For each line in the delta
1627 # For each line in the delta
1626 for f, fnode in deltamf.items():
1628 for f, fnode in deltamf.items():
1627 f = changedfiles.get(f, None)
1629 f = changedfiles.get(f, None)
1628 # And if the file is in the list of files we care
1630 # And if the file is in the list of files we care
1629 # about.
1631 # about.
1630 if f is not None:
1632 if f is not None:
1631 # Get the changenode this manifest belongs to
1633 # Get the changenode this manifest belongs to
1632 clnode = msng_mnfst_set[mnfstnode]
1634 clnode = msng_mnfst_set[mnfstnode]
1633 # Create the set of filenodes for the file if
1635 # Create the set of filenodes for the file if
1634 # there isn't one already.
1636 # there isn't one already.
1635 ndset = msng_filenode_set.setdefault(f, {})
1637 ndset = msng_filenode_set.setdefault(f, {})
1636 # And set the filenode's changelog node to the
1638 # And set the filenode's changelog node to the
1637 # manifest's if it hasn't been set already.
1639 # manifest's if it hasn't been set already.
1638 ndset.setdefault(fnode, clnode)
1640 ndset.setdefault(fnode, clnode)
1639 else:
1641 else:
1640 # Otherwise we need a full manifest.
1642 # Otherwise we need a full manifest.
1641 m = mnfst.read(mnfstnode)
1643 m = mnfst.read(mnfstnode)
1642 # For every file in we care about.
1644 # For every file in we care about.
1643 for f in changedfiles:
1645 for f in changedfiles:
1644 fnode = m.get(f, None)
1646 fnode = m.get(f, None)
1645 # If it's in the manifest
1647 # If it's in the manifest
1646 if fnode is not None:
1648 if fnode is not None:
1647 # See comments above.
1649 # See comments above.
1648 clnode = msng_mnfst_set[mnfstnode]
1650 clnode = msng_mnfst_set[mnfstnode]
1649 ndset = msng_filenode_set.setdefault(f, {})
1651 ndset = msng_filenode_set.setdefault(f, {})
1650 ndset.setdefault(fnode, clnode)
1652 ndset.setdefault(fnode, clnode)
1651 # Remember the revision we hope to see next.
1653 # Remember the revision we hope to see next.
1652 next_rev[0] = r + 1
1654 next_rev[0] = r + 1
1653 return collect_msng_filenodes
1655 return collect_msng_filenodes
1654
1656
1655 # We have a list of filenodes we think we need for a file, lets remove
1657 # We have a list of filenodes we think we need for a file, lets remove
1656 # all those we now the recipient must have.
1658 # all those we now the recipient must have.
1657 def prune_filenodes(f, filerevlog):
1659 def prune_filenodes(f, filerevlog):
1658 msngset = msng_filenode_set[f]
1660 msngset = msng_filenode_set[f]
1659 hasset = {}
1661 hasset = {}
1660 # If a 'missing' filenode thinks it belongs to a changenode we
1662 # If a 'missing' filenode thinks it belongs to a changenode we
1661 # assume the recipient must have, then the recipient must have
1663 # assume the recipient must have, then the recipient must have
1662 # that filenode.
1664 # that filenode.
1663 for n in msngset:
1665 for n in msngset:
1664 clnode = cl.node(filerevlog.linkrev(n))
1666 clnode = cl.node(filerevlog.linkrev(n))
1665 if clnode in has_cl_set:
1667 if clnode in has_cl_set:
1666 hasset[n] = 1
1668 hasset[n] = 1
1667 prune_parents(filerevlog, hasset, msngset)
1669 prune_parents(filerevlog, hasset, msngset)
1668
1670
1669 # A function generator function that sets up the a context for the
1671 # A function generator function that sets up the a context for the
1670 # inner function.
1672 # inner function.
1671 def lookup_filenode_link_func(fname):
1673 def lookup_filenode_link_func(fname):
1672 msngset = msng_filenode_set[fname]
1674 msngset = msng_filenode_set[fname]
1673 # Lookup the changenode the filenode belongs to.
1675 # Lookup the changenode the filenode belongs to.
1674 def lookup_filenode_link(fnode):
1676 def lookup_filenode_link(fnode):
1675 return msngset[fnode]
1677 return msngset[fnode]
1676 return lookup_filenode_link
1678 return lookup_filenode_link
1677
1679
1678 # Now that we have all theses utility functions to help out and
1680 # Now that we have all theses utility functions to help out and
1679 # logically divide up the task, generate the group.
1681 # logically divide up the task, generate the group.
1680 def gengroup():
1682 def gengroup():
1681 # The set of changed files starts empty.
1683 # The set of changed files starts empty.
1682 changedfiles = {}
1684 changedfiles = {}
1683 # Create a changenode group generator that will call our functions
1685 # Create a changenode group generator that will call our functions
1684 # back to lookup the owning changenode and collect information.
1686 # back to lookup the owning changenode and collect information.
1685 group = cl.group(msng_cl_lst, identity,
1687 group = cl.group(msng_cl_lst, identity,
1686 manifest_and_file_collector(changedfiles))
1688 manifest_and_file_collector(changedfiles))
1687 for chnk in group:
1689 for chnk in group:
1688 yield chnk
1690 yield chnk
1689
1691
1690 # The list of manifests has been collected by the generator
1692 # The list of manifests has been collected by the generator
1691 # calling our functions back.
1693 # calling our functions back.
1692 prune_manifests()
1694 prune_manifests()
1693 msng_mnfst_lst = msng_mnfst_set.keys()
1695 msng_mnfst_lst = msng_mnfst_set.keys()
1694 # Sort the manifestnodes by revision number.
1696 # Sort the manifestnodes by revision number.
1695 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1697 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1696 # Create a generator for the manifestnodes that calls our lookup
1698 # Create a generator for the manifestnodes that calls our lookup
1697 # and data collection functions back.
1699 # and data collection functions back.
1698 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1700 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1699 filenode_collector(changedfiles))
1701 filenode_collector(changedfiles))
1700 for chnk in group:
1702 for chnk in group:
1701 yield chnk
1703 yield chnk
1702
1704
1703 # These are no longer needed, dereference and toss the memory for
1705 # These are no longer needed, dereference and toss the memory for
1704 # them.
1706 # them.
1705 msng_mnfst_lst = None
1707 msng_mnfst_lst = None
1706 msng_mnfst_set.clear()
1708 msng_mnfst_set.clear()
1707
1709
1708 changedfiles = changedfiles.keys()
1710 changedfiles = changedfiles.keys()
1709 changedfiles.sort()
1711 changedfiles.sort()
1710 # Go through all our files in order sorted by name.
1712 # Go through all our files in order sorted by name.
1711 for fname in changedfiles:
1713 for fname in changedfiles:
1712 filerevlog = self.file(fname)
1714 filerevlog = self.file(fname)
1713 # Toss out the filenodes that the recipient isn't really
1715 # Toss out the filenodes that the recipient isn't really
1714 # missing.
1716 # missing.
1715 if msng_filenode_set.has_key(fname):
1717 if msng_filenode_set.has_key(fname):
1716 prune_filenodes(fname, filerevlog)
1718 prune_filenodes(fname, filerevlog)
1717 msng_filenode_lst = msng_filenode_set[fname].keys()
1719 msng_filenode_lst = msng_filenode_set[fname].keys()
1718 else:
1720 else:
1719 msng_filenode_lst = []
1721 msng_filenode_lst = []
1720 # If any filenodes are left, generate the group for them,
1722 # If any filenodes are left, generate the group for them,
1721 # otherwise don't bother.
1723 # otherwise don't bother.
1722 if len(msng_filenode_lst) > 0:
1724 if len(msng_filenode_lst) > 0:
1723 yield changegroup.chunkheader(len(fname))
1725 yield changegroup.chunkheader(len(fname))
1724 yield fname
1726 yield fname
1725 # Sort the filenodes by their revision #
1727 # Sort the filenodes by their revision #
1726 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1728 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1727 # Create a group generator and only pass in a changenode
1729 # Create a group generator and only pass in a changenode
1728 # lookup function as we need to collect no information
1730 # lookup function as we need to collect no information
1729 # from filenodes.
1731 # from filenodes.
1730 group = filerevlog.group(msng_filenode_lst,
1732 group = filerevlog.group(msng_filenode_lst,
1731 lookup_filenode_link_func(fname))
1733 lookup_filenode_link_func(fname))
1732 for chnk in group:
1734 for chnk in group:
1733 yield chnk
1735 yield chnk
1734 if msng_filenode_set.has_key(fname):
1736 if msng_filenode_set.has_key(fname):
1735 # Don't need this anymore, toss it to free memory.
1737 # Don't need this anymore, toss it to free memory.
1736 del msng_filenode_set[fname]
1738 del msng_filenode_set[fname]
1737 # Signal that no more groups are left.
1739 # Signal that no more groups are left.
1738 yield changegroup.closechunk()
1740 yield changegroup.closechunk()
1739
1741
1740 if msng_cl_lst:
1742 if msng_cl_lst:
1741 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1743 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1742
1744
1743 return util.chunkbuffer(gengroup())
1745 return util.chunkbuffer(gengroup())
1744
1746
1745 def changegroup(self, basenodes, source):
1747 def changegroup(self, basenodes, source):
1746 """Generate a changegroup of all nodes that we have that a recipient
1748 """Generate a changegroup of all nodes that we have that a recipient
1747 doesn't.
1749 doesn't.
1748
1750
1749 This is much easier than the previous function as we can assume that
1751 This is much easier than the previous function as we can assume that
1750 the recipient has any changenode we aren't sending them."""
1752 the recipient has any changenode we aren't sending them."""
1751
1753
1752 self.hook('preoutgoing', throw=True, source=source)
1754 self.hook('preoutgoing', throw=True, source=source)
1753
1755
1754 cl = self.changelog
1756 cl = self.changelog
1755 nodes = cl.nodesbetween(basenodes, None)[0]
1757 nodes = cl.nodesbetween(basenodes, None)[0]
1756 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1758 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1757 self.changegroupinfo(nodes)
1759 self.changegroupinfo(nodes)
1758
1760
1759 def identity(x):
1761 def identity(x):
1760 return x
1762 return x
1761
1763
1762 def gennodelst(revlog):
1764 def gennodelst(revlog):
1763 for r in xrange(0, revlog.count()):
1765 for r in xrange(0, revlog.count()):
1764 n = revlog.node(r)
1766 n = revlog.node(r)
1765 if revlog.linkrev(n) in revset:
1767 if revlog.linkrev(n) in revset:
1766 yield n
1768 yield n
1767
1769
1768 def changed_file_collector(changedfileset):
1770 def changed_file_collector(changedfileset):
1769 def collect_changed_files(clnode):
1771 def collect_changed_files(clnode):
1770 c = cl.read(clnode)
1772 c = cl.read(clnode)
1771 for fname in c[3]:
1773 for fname in c[3]:
1772 changedfileset[fname] = 1
1774 changedfileset[fname] = 1
1773 return collect_changed_files
1775 return collect_changed_files
1774
1776
1775 def lookuprevlink_func(revlog):
1777 def lookuprevlink_func(revlog):
1776 def lookuprevlink(n):
1778 def lookuprevlink(n):
1777 return cl.node(revlog.linkrev(n))
1779 return cl.node(revlog.linkrev(n))
1778 return lookuprevlink
1780 return lookuprevlink
1779
1781
1780 def gengroup():
1782 def gengroup():
1781 # construct a list of all changed files
1783 # construct a list of all changed files
1782 changedfiles = {}
1784 changedfiles = {}
1783
1785
1784 for chnk in cl.group(nodes, identity,
1786 for chnk in cl.group(nodes, identity,
1785 changed_file_collector(changedfiles)):
1787 changed_file_collector(changedfiles)):
1786 yield chnk
1788 yield chnk
1787 changedfiles = changedfiles.keys()
1789 changedfiles = changedfiles.keys()
1788 changedfiles.sort()
1790 changedfiles.sort()
1789
1791
1790 mnfst = self.manifest
1792 mnfst = self.manifest
1791 nodeiter = gennodelst(mnfst)
1793 nodeiter = gennodelst(mnfst)
1792 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1794 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1793 yield chnk
1795 yield chnk
1794
1796
1795 for fname in changedfiles:
1797 for fname in changedfiles:
1796 filerevlog = self.file(fname)
1798 filerevlog = self.file(fname)
1797 nodeiter = gennodelst(filerevlog)
1799 nodeiter = gennodelst(filerevlog)
1798 nodeiter = list(nodeiter)
1800 nodeiter = list(nodeiter)
1799 if nodeiter:
1801 if nodeiter:
1800 yield changegroup.chunkheader(len(fname))
1802 yield changegroup.chunkheader(len(fname))
1801 yield fname
1803 yield fname
1802 lookup = lookuprevlink_func(filerevlog)
1804 lookup = lookuprevlink_func(filerevlog)
1803 for chnk in filerevlog.group(nodeiter, lookup):
1805 for chnk in filerevlog.group(nodeiter, lookup):
1804 yield chnk
1806 yield chnk
1805
1807
1806 yield changegroup.closechunk()
1808 yield changegroup.closechunk()
1807
1809
1808 if nodes:
1810 if nodes:
1809 self.hook('outgoing', node=hex(nodes[0]), source=source)
1811 self.hook('outgoing', node=hex(nodes[0]), source=source)
1810
1812
1811 return util.chunkbuffer(gengroup())
1813 return util.chunkbuffer(gengroup())
1812
1814
1813 def addchangegroup(self, source, srctype, url):
1815 def addchangegroup(self, source, srctype, url):
1814 """add changegroup to repo.
1816 """add changegroup to repo.
1815
1817
1816 return values:
1818 return values:
1817 - nothing changed or no source: 0
1819 - nothing changed or no source: 0
1818 - more heads than before: 1+added heads (2..n)
1820 - more heads than before: 1+added heads (2..n)
1819 - less heads than before: -1-removed heads (-2..-n)
1821 - less heads than before: -1-removed heads (-2..-n)
1820 - number of heads stays the same: 1
1822 - number of heads stays the same: 1
1821 """
1823 """
1822 def csmap(x):
1824 def csmap(x):
1823 self.ui.debug(_("add changeset %s\n") % short(x))
1825 self.ui.debug(_("add changeset %s\n") % short(x))
1824 return cl.count()
1826 return cl.count()
1825
1827
1826 def revmap(x):
1828 def revmap(x):
1827 return cl.rev(x)
1829 return cl.rev(x)
1828
1830
1829 if not source:
1831 if not source:
1830 return 0
1832 return 0
1831
1833
1832 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1834 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1833
1835
1834 changesets = files = revisions = 0
1836 changesets = files = revisions = 0
1835
1837
1836 # write changelog data to temp files so concurrent readers will not see
1838 # write changelog data to temp files so concurrent readers will not see
1837 # inconsistent view
1839 # inconsistent view
1838 cl = self.changelog
1840 cl = self.changelog
1839 cl.delayupdate()
1841 cl.delayupdate()
1840 oldheads = len(cl.heads())
1842 oldheads = len(cl.heads())
1841
1843
1842 tr = self.transaction()
1844 tr = self.transaction()
1843 try:
1845 try:
1844 trp = weakref.proxy(tr)
1846 trp = weakref.proxy(tr)
1845 # pull off the changeset group
1847 # pull off the changeset group
1846 self.ui.status(_("adding changesets\n"))
1848 self.ui.status(_("adding changesets\n"))
1847 cor = cl.count() - 1
1849 cor = cl.count() - 1
1848 chunkiter = changegroup.chunkiter(source)
1850 chunkiter = changegroup.chunkiter(source)
1849 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1851 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1850 raise util.Abort(_("received changelog group is empty"))
1852 raise util.Abort(_("received changelog group is empty"))
1851 cnr = cl.count() - 1
1853 cnr = cl.count() - 1
1852 changesets = cnr - cor
1854 changesets = cnr - cor
1853
1855
1854 # pull off the manifest group
1856 # pull off the manifest group
1855 self.ui.status(_("adding manifests\n"))
1857 self.ui.status(_("adding manifests\n"))
1856 chunkiter = changegroup.chunkiter(source)
1858 chunkiter = changegroup.chunkiter(source)
1857 # no need to check for empty manifest group here:
1859 # no need to check for empty manifest group here:
1858 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1860 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1859 # no new manifest will be created and the manifest group will
1861 # no new manifest will be created and the manifest group will
1860 # be empty during the pull
1862 # be empty during the pull
1861 self.manifest.addgroup(chunkiter, revmap, trp)
1863 self.manifest.addgroup(chunkiter, revmap, trp)
1862
1864
1863 # process the files
1865 # process the files
1864 self.ui.status(_("adding file changes\n"))
1866 self.ui.status(_("adding file changes\n"))
1865 while 1:
1867 while 1:
1866 f = changegroup.getchunk(source)
1868 f = changegroup.getchunk(source)
1867 if not f:
1869 if not f:
1868 break
1870 break
1869 self.ui.debug(_("adding %s revisions\n") % f)
1871 self.ui.debug(_("adding %s revisions\n") % f)
1870 fl = self.file(f)
1872 fl = self.file(f)
1871 o = fl.count()
1873 o = fl.count()
1872 chunkiter = changegroup.chunkiter(source)
1874 chunkiter = changegroup.chunkiter(source)
1873 if fl.addgroup(chunkiter, revmap, trp) is None:
1875 if fl.addgroup(chunkiter, revmap, trp) is None:
1874 raise util.Abort(_("received file revlog group is empty"))
1876 raise util.Abort(_("received file revlog group is empty"))
1875 revisions += fl.count() - o
1877 revisions += fl.count() - o
1876 files += 1
1878 files += 1
1877
1879
1878 # make changelog see real files again
1880 # make changelog see real files again
1879 cl.finalize(trp)
1881 cl.finalize(trp)
1880
1882
1881 newheads = len(self.changelog.heads())
1883 newheads = len(self.changelog.heads())
1882 heads = ""
1884 heads = ""
1883 if oldheads and newheads != oldheads:
1885 if oldheads and newheads != oldheads:
1884 heads = _(" (%+d heads)") % (newheads - oldheads)
1886 heads = _(" (%+d heads)") % (newheads - oldheads)
1885
1887
1886 self.ui.status(_("added %d changesets"
1888 self.ui.status(_("added %d changesets"
1887 " with %d changes to %d files%s\n")
1889 " with %d changes to %d files%s\n")
1888 % (changesets, revisions, files, heads))
1890 % (changesets, revisions, files, heads))
1889
1891
1890 if changesets > 0:
1892 if changesets > 0:
1891 self.hook('pretxnchangegroup', throw=True,
1893 self.hook('pretxnchangegroup', throw=True,
1892 node=hex(self.changelog.node(cor+1)), source=srctype,
1894 node=hex(self.changelog.node(cor+1)), source=srctype,
1893 url=url)
1895 url=url)
1894
1896
1895 tr.close()
1897 tr.close()
1896 finally:
1898 finally:
1897 del tr
1899 del tr
1898
1900
1899 if changesets > 0:
1901 if changesets > 0:
1900 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1902 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1901 source=srctype, url=url)
1903 source=srctype, url=url)
1902
1904
1903 for i in xrange(cor + 1, cnr + 1):
1905 for i in xrange(cor + 1, cnr + 1):
1904 self.hook("incoming", node=hex(self.changelog.node(i)),
1906 self.hook("incoming", node=hex(self.changelog.node(i)),
1905 source=srctype, url=url)
1907 source=srctype, url=url)
1906
1908
1907 # never return 0 here:
1909 # never return 0 here:
1908 if newheads < oldheads:
1910 if newheads < oldheads:
1909 return newheads - oldheads - 1
1911 return newheads - oldheads - 1
1910 else:
1912 else:
1911 return newheads - oldheads + 1
1913 return newheads - oldheads + 1
1912
1914
1913
1915
1914 def stream_in(self, remote):
1916 def stream_in(self, remote):
1915 fp = remote.stream_out()
1917 fp = remote.stream_out()
1916 l = fp.readline()
1918 l = fp.readline()
1917 try:
1919 try:
1918 resp = int(l)
1920 resp = int(l)
1919 except ValueError:
1921 except ValueError:
1920 raise util.UnexpectedOutput(
1922 raise util.UnexpectedOutput(
1921 _('Unexpected response from remote server:'), l)
1923 _('Unexpected response from remote server:'), l)
1922 if resp == 1:
1924 if resp == 1:
1923 raise util.Abort(_('operation forbidden by server'))
1925 raise util.Abort(_('operation forbidden by server'))
1924 elif resp == 2:
1926 elif resp == 2:
1925 raise util.Abort(_('locking the remote repository failed'))
1927 raise util.Abort(_('locking the remote repository failed'))
1926 elif resp != 0:
1928 elif resp != 0:
1927 raise util.Abort(_('the server sent an unknown error code'))
1929 raise util.Abort(_('the server sent an unknown error code'))
1928 self.ui.status(_('streaming all changes\n'))
1930 self.ui.status(_('streaming all changes\n'))
1929 l = fp.readline()
1931 l = fp.readline()
1930 try:
1932 try:
1931 total_files, total_bytes = map(int, l.split(' ', 1))
1933 total_files, total_bytes = map(int, l.split(' ', 1))
1932 except ValueError, TypeError:
1934 except ValueError, TypeError:
1933 raise util.UnexpectedOutput(
1935 raise util.UnexpectedOutput(
1934 _('Unexpected response from remote server:'), l)
1936 _('Unexpected response from remote server:'), l)
1935 self.ui.status(_('%d files to transfer, %s of data\n') %
1937 self.ui.status(_('%d files to transfer, %s of data\n') %
1936 (total_files, util.bytecount(total_bytes)))
1938 (total_files, util.bytecount(total_bytes)))
1937 start = time.time()
1939 start = time.time()
1938 for i in xrange(total_files):
1940 for i in xrange(total_files):
1939 # XXX doesn't support '\n' or '\r' in filenames
1941 # XXX doesn't support '\n' or '\r' in filenames
1940 l = fp.readline()
1942 l = fp.readline()
1941 try:
1943 try:
1942 name, size = l.split('\0', 1)
1944 name, size = l.split('\0', 1)
1943 size = int(size)
1945 size = int(size)
1944 except ValueError, TypeError:
1946 except ValueError, TypeError:
1945 raise util.UnexpectedOutput(
1947 raise util.UnexpectedOutput(
1946 _('Unexpected response from remote server:'), l)
1948 _('Unexpected response from remote server:'), l)
1947 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1949 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1948 ofp = self.sopener(name, 'w')
1950 ofp = self.sopener(name, 'w')
1949 for chunk in util.filechunkiter(fp, limit=size):
1951 for chunk in util.filechunkiter(fp, limit=size):
1950 ofp.write(chunk)
1952 ofp.write(chunk)
1951 ofp.close()
1953 ofp.close()
1952 elapsed = time.time() - start
1954 elapsed = time.time() - start
1953 if elapsed <= 0:
1955 if elapsed <= 0:
1954 elapsed = 0.001
1956 elapsed = 0.001
1955 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1957 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1956 (util.bytecount(total_bytes), elapsed,
1958 (util.bytecount(total_bytes), elapsed,
1957 util.bytecount(total_bytes / elapsed)))
1959 util.bytecount(total_bytes / elapsed)))
1958 self.invalidate()
1960 self.invalidate()
1959 return len(self.heads()) + 1
1961 return len(self.heads()) + 1
1960
1962
1961 def clone(self, remote, heads=[], stream=False):
1963 def clone(self, remote, heads=[], stream=False):
1962 '''clone remote repository.
1964 '''clone remote repository.
1963
1965
1964 keyword arguments:
1966 keyword arguments:
1965 heads: list of revs to clone (forces use of pull)
1967 heads: list of revs to clone (forces use of pull)
1966 stream: use streaming clone if possible'''
1968 stream: use streaming clone if possible'''
1967
1969
1968 # now, all clients that can request uncompressed clones can
1970 # now, all clients that can request uncompressed clones can
1969 # read repo formats supported by all servers that can serve
1971 # read repo formats supported by all servers that can serve
1970 # them.
1972 # them.
1971
1973
1972 # if revlog format changes, client will have to check version
1974 # if revlog format changes, client will have to check version
1973 # and format flags on "stream" capability, and use
1975 # and format flags on "stream" capability, and use
1974 # uncompressed only if compatible.
1976 # uncompressed only if compatible.
1975
1977
1976 if stream and not heads and remote.capable('stream'):
1978 if stream and not heads and remote.capable('stream'):
1977 return self.stream_in(remote)
1979 return self.stream_in(remote)
1978 return self.pull(remote, heads)
1980 return self.pull(remote, heads)
1979
1981
1980 # used to avoid circular references so destructors work
1982 # used to avoid circular references so destructors work
1981 def aftertrans(files):
1983 def aftertrans(files):
1982 renamefiles = [tuple(t) for t in files]
1984 renamefiles = [tuple(t) for t in files]
1983 def a():
1985 def a():
1984 for src, dest in renamefiles:
1986 for src, dest in renamefiles:
1985 util.rename(src, dest)
1987 util.rename(src, dest)
1986 return a
1988 return a
1987
1989
1988 def instance(ui, path, create):
1990 def instance(ui, path, create):
1989 return localrepository(ui, util.drop_scheme('file', path), create)
1991 return localrepository(ui, util.drop_scheme('file', path), create)
1990
1992
1991 def islocal(path):
1993 def islocal(path):
1992 return True
1994 return True
General Comments 0
You need to be logged in to leave comments. Login now