##// END OF EJS Templates
rename and simplify do_lock
Matt Mackall -
r4913:46e39935 default
parent child Browse files
Show More
@@ -1,1952 +1,1950 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.path = path
23 self.path = path
24 self.root = os.path.realpath(path)
24 self.root = os.path.realpath(path)
25 self.path = os.path.join(self.root, ".hg")
25 self.path = os.path.join(self.root, ".hg")
26 self.origroot = path
26 self.origroot = path
27 self.opener = util.opener(self.path)
27 self.opener = util.opener(self.path)
28 self.wopener = util.opener(self.root)
28 self.wopener = util.opener(self.root)
29
29
30 if not os.path.isdir(self.path):
30 if not os.path.isdir(self.path):
31 if create:
31 if create:
32 if not os.path.exists(path):
32 if not os.path.exists(path):
33 os.mkdir(path)
33 os.mkdir(path)
34 os.mkdir(self.path)
34 os.mkdir(self.path)
35 requirements = ["revlogv1"]
35 requirements = ["revlogv1"]
36 if parentui.configbool('format', 'usestore', True):
36 if parentui.configbool('format', 'usestore', True):
37 os.mkdir(os.path.join(self.path, "store"))
37 os.mkdir(os.path.join(self.path, "store"))
38 requirements.append("store")
38 requirements.append("store")
39 # create an invalid changelog
39 # create an invalid changelog
40 self.opener("00changelog.i", "a").write(
40 self.opener("00changelog.i", "a").write(
41 '\0\0\0\2' # represents revlogv2
41 '\0\0\0\2' # represents revlogv2
42 ' dummy changelog to prevent using the old repo layout'
42 ' dummy changelog to prevent using the old repo layout'
43 )
43 )
44 reqfile = self.opener("requires", "w")
44 reqfile = self.opener("requires", "w")
45 for r in requirements:
45 for r in requirements:
46 reqfile.write("%s\n" % r)
46 reqfile.write("%s\n" % r)
47 reqfile.close()
47 reqfile.close()
48 else:
48 else:
49 raise repo.RepoError(_("repository %s not found") % path)
49 raise repo.RepoError(_("repository %s not found") % path)
50 elif create:
50 elif create:
51 raise repo.RepoError(_("repository %s already exists") % path)
51 raise repo.RepoError(_("repository %s already exists") % path)
52 else:
52 else:
53 # find requirements
53 # find requirements
54 try:
54 try:
55 requirements = self.opener("requires").read().splitlines()
55 requirements = self.opener("requires").read().splitlines()
56 except IOError, inst:
56 except IOError, inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 requirements = []
59 requirements = []
60 # check them
60 # check them
61 for r in requirements:
61 for r in requirements:
62 if r not in self.supported:
62 if r not in self.supported:
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64
64
65 # setup store
65 # setup store
66 if "store" in requirements:
66 if "store" in requirements:
67 self.encodefn = util.encodefilename
67 self.encodefn = util.encodefilename
68 self.decodefn = util.decodefilename
68 self.decodefn = util.decodefilename
69 self.spath = os.path.join(self.path, "store")
69 self.spath = os.path.join(self.path, "store")
70 else:
70 else:
71 self.encodefn = lambda x: x
71 self.encodefn = lambda x: x
72 self.decodefn = lambda x: x
72 self.decodefn = lambda x: x
73 self.spath = self.path
73 self.spath = self.path
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75
75
76 self.ui = ui.ui(parentui=parentui)
76 self.ui = ui.ui(parentui=parentui)
77 try:
77 try:
78 self.ui.readconfig(self.join("hgrc"), self.root)
78 self.ui.readconfig(self.join("hgrc"), self.root)
79 extensions.loadall(self.ui)
79 extensions.loadall(self.ui)
80 except IOError:
80 except IOError:
81 pass
81 pass
82
82
83 self.tagscache = None
83 self.tagscache = None
84 self.branchcache = None
84 self.branchcache = None
85 self.nodetagscache = None
85 self.nodetagscache = None
86 self.filterpats = {}
86 self.filterpats = {}
87 self.transhandle = None
87 self.transhandle = None
88
88
89 def __getattr__(self, name):
89 def __getattr__(self, name):
90 if name == 'changelog':
90 if name == 'changelog':
91 self.changelog = changelog.changelog(self.sopener)
91 self.changelog = changelog.changelog(self.sopener)
92 self.sopener.defversion = self.changelog.version
92 self.sopener.defversion = self.changelog.version
93 return self.changelog
93 return self.changelog
94 if name == 'manifest':
94 if name == 'manifest':
95 self.changelog
95 self.changelog
96 self.manifest = manifest.manifest(self.sopener)
96 self.manifest = manifest.manifest(self.sopener)
97 return self.manifest
97 return self.manifest
98 if name == 'dirstate':
98 if name == 'dirstate':
99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 return self.dirstate
100 return self.dirstate
101 else:
101 else:
102 raise AttributeError, name
102 raise AttributeError, name
103
103
104 def url(self):
104 def url(self):
105 return 'file:' + self.root
105 return 'file:' + self.root
106
106
107 def hook(self, name, throw=False, **args):
107 def hook(self, name, throw=False, **args):
108 return hook.hook(self.ui, self, name, throw, **args)
108 return hook.hook(self.ui, self, name, throw, **args)
109
109
110 tag_disallowed = ':\r\n'
110 tag_disallowed = ':\r\n'
111
111
112 def _tag(self, name, node, message, local, user, date, parent=None,
112 def _tag(self, name, node, message, local, user, date, parent=None,
113 extra={}):
113 extra={}):
114 use_dirstate = parent is None
114 use_dirstate = parent is None
115
115
116 for c in self.tag_disallowed:
116 for c in self.tag_disallowed:
117 if c in name:
117 if c in name:
118 raise util.Abort(_('%r cannot be used in a tag name') % c)
118 raise util.Abort(_('%r cannot be used in a tag name') % c)
119
119
120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
121
121
122 def writetag(fp, name, munge, prevtags):
122 def writetag(fp, name, munge, prevtags):
123 if prevtags and prevtags[-1] != '\n':
123 if prevtags and prevtags[-1] != '\n':
124 fp.write('\n')
124 fp.write('\n')
125 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
126 fp.close()
126 fp.close()
127 self.hook('tag', node=hex(node), tag=name, local=local)
127 self.hook('tag', node=hex(node), tag=name, local=local)
128
128
129 prevtags = ''
129 prevtags = ''
130 if local:
130 if local:
131 try:
131 try:
132 fp = self.opener('localtags', 'r+')
132 fp = self.opener('localtags', 'r+')
133 except IOError, err:
133 except IOError, err:
134 fp = self.opener('localtags', 'a')
134 fp = self.opener('localtags', 'a')
135 else:
135 else:
136 prevtags = fp.read()
136 prevtags = fp.read()
137
137
138 # local tags are stored in the current charset
138 # local tags are stored in the current charset
139 writetag(fp, name, None, prevtags)
139 writetag(fp, name, None, prevtags)
140 return
140 return
141
141
142 if use_dirstate:
142 if use_dirstate:
143 try:
143 try:
144 fp = self.wfile('.hgtags', 'rb+')
144 fp = self.wfile('.hgtags', 'rb+')
145 except IOError, err:
145 except IOError, err:
146 fp = self.wfile('.hgtags', 'ab')
146 fp = self.wfile('.hgtags', 'ab')
147 else:
147 else:
148 prevtags = fp.read()
148 prevtags = fp.read()
149 else:
149 else:
150 try:
150 try:
151 prevtags = self.filectx('.hgtags', parent).data()
151 prevtags = self.filectx('.hgtags', parent).data()
152 except revlog.LookupError:
152 except revlog.LookupError:
153 pass
153 pass
154 fp = self.wfile('.hgtags', 'wb')
154 fp = self.wfile('.hgtags', 'wb')
155
155
156 # committed tags are stored in UTF-8
156 # committed tags are stored in UTF-8
157 writetag(fp, name, util.fromlocal, prevtags)
157 writetag(fp, name, util.fromlocal, prevtags)
158
158
159 if use_dirstate and '.hgtags' not in self.dirstate:
159 if use_dirstate and '.hgtags' not in self.dirstate:
160 self.add(['.hgtags'])
160 self.add(['.hgtags'])
161
161
162 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
162 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 extra=extra)
163 extra=extra)
164
164
165 self.hook('tag', node=hex(node), tag=name, local=local)
165 self.hook('tag', node=hex(node), tag=name, local=local)
166
166
167 return tagnode
167 return tagnode
168
168
169 def tag(self, name, node, message, local, user, date):
169 def tag(self, name, node, message, local, user, date):
170 '''tag a revision with a symbolic name.
170 '''tag a revision with a symbolic name.
171
171
172 if local is True, the tag is stored in a per-repository file.
172 if local is True, the tag is stored in a per-repository file.
173 otherwise, it is stored in the .hgtags file, and a new
173 otherwise, it is stored in the .hgtags file, and a new
174 changeset is committed with the change.
174 changeset is committed with the change.
175
175
176 keyword arguments:
176 keyword arguments:
177
177
178 local: whether to store tag in non-version-controlled file
178 local: whether to store tag in non-version-controlled file
179 (default False)
179 (default False)
180
180
181 message: commit message to use if committing
181 message: commit message to use if committing
182
182
183 user: name of user to use if committing
183 user: name of user to use if committing
184
184
185 date: date tuple to use if committing'''
185 date: date tuple to use if committing'''
186
186
187 for x in self.status()[:5]:
187 for x in self.status()[:5]:
188 if '.hgtags' in x:
188 if '.hgtags' in x:
189 raise util.Abort(_('working copy of .hgtags is changed '
189 raise util.Abort(_('working copy of .hgtags is changed '
190 '(please commit .hgtags manually)'))
190 '(please commit .hgtags manually)'))
191
191
192
192
193 self._tag(name, node, message, local, user, date)
193 self._tag(name, node, message, local, user, date)
194
194
195 def tags(self):
195 def tags(self):
196 '''return a mapping of tag to node'''
196 '''return a mapping of tag to node'''
197 if self.tagscache:
197 if self.tagscache:
198 return self.tagscache
198 return self.tagscache
199
199
200 globaltags = {}
200 globaltags = {}
201
201
202 def readtags(lines, fn):
202 def readtags(lines, fn):
203 filetags = {}
203 filetags = {}
204 count = 0
204 count = 0
205
205
206 def warn(msg):
206 def warn(msg):
207 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
207 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
208
208
209 for l in lines:
209 for l in lines:
210 count += 1
210 count += 1
211 if not l:
211 if not l:
212 continue
212 continue
213 s = l.split(" ", 1)
213 s = l.split(" ", 1)
214 if len(s) != 2:
214 if len(s) != 2:
215 warn(_("cannot parse entry"))
215 warn(_("cannot parse entry"))
216 continue
216 continue
217 node, key = s
217 node, key = s
218 key = util.tolocal(key.strip()) # stored in UTF-8
218 key = util.tolocal(key.strip()) # stored in UTF-8
219 try:
219 try:
220 bin_n = bin(node)
220 bin_n = bin(node)
221 except TypeError:
221 except TypeError:
222 warn(_("node '%s' is not well formed") % node)
222 warn(_("node '%s' is not well formed") % node)
223 continue
223 continue
224 if bin_n not in self.changelog.nodemap:
224 if bin_n not in self.changelog.nodemap:
225 warn(_("tag '%s' refers to unknown node") % key)
225 warn(_("tag '%s' refers to unknown node") % key)
226 continue
226 continue
227
227
228 h = []
228 h = []
229 if key in filetags:
229 if key in filetags:
230 n, h = filetags[key]
230 n, h = filetags[key]
231 h.append(n)
231 h.append(n)
232 filetags[key] = (bin_n, h)
232 filetags[key] = (bin_n, h)
233
233
234 for k, nh in filetags.items():
234 for k, nh in filetags.items():
235 if k not in globaltags:
235 if k not in globaltags:
236 globaltags[k] = nh
236 globaltags[k] = nh
237 continue
237 continue
238 # we prefer the global tag if:
238 # we prefer the global tag if:
239 # it supercedes us OR
239 # it supercedes us OR
240 # mutual supercedes and it has a higher rank
240 # mutual supercedes and it has a higher rank
241 # otherwise we win because we're tip-most
241 # otherwise we win because we're tip-most
242 an, ah = nh
242 an, ah = nh
243 bn, bh = globaltags[k]
243 bn, bh = globaltags[k]
244 if (bn != an and an in bh and
244 if (bn != an and an in bh and
245 (bn not in ah or len(bh) > len(ah))):
245 (bn not in ah or len(bh) > len(ah))):
246 an = bn
246 an = bn
247 ah.extend([n for n in bh if n not in ah])
247 ah.extend([n for n in bh if n not in ah])
248 globaltags[k] = an, ah
248 globaltags[k] = an, ah
249
249
250 # read the tags file from each head, ending with the tip
250 # read the tags file from each head, ending with the tip
251 f = None
251 f = None
252 for rev, node, fnode in self._hgtagsnodes():
252 for rev, node, fnode in self._hgtagsnodes():
253 f = (f and f.filectx(fnode) or
253 f = (f and f.filectx(fnode) or
254 self.filectx('.hgtags', fileid=fnode))
254 self.filectx('.hgtags', fileid=fnode))
255 readtags(f.data().splitlines(), f)
255 readtags(f.data().splitlines(), f)
256
256
257 try:
257 try:
258 data = util.fromlocal(self.opener("localtags").read())
258 data = util.fromlocal(self.opener("localtags").read())
259 # localtags are stored in the local character set
259 # localtags are stored in the local character set
260 # while the internal tag table is stored in UTF-8
260 # while the internal tag table is stored in UTF-8
261 readtags(data.splitlines(), "localtags")
261 readtags(data.splitlines(), "localtags")
262 except IOError:
262 except IOError:
263 pass
263 pass
264
264
265 self.tagscache = {}
265 self.tagscache = {}
266 for k,nh in globaltags.items():
266 for k,nh in globaltags.items():
267 n = nh[0]
267 n = nh[0]
268 if n != nullid:
268 if n != nullid:
269 self.tagscache[k] = n
269 self.tagscache[k] = n
270 self.tagscache['tip'] = self.changelog.tip()
270 self.tagscache['tip'] = self.changelog.tip()
271
271
272 return self.tagscache
272 return self.tagscache
273
273
274 def _hgtagsnodes(self):
274 def _hgtagsnodes(self):
275 heads = self.heads()
275 heads = self.heads()
276 heads.reverse()
276 heads.reverse()
277 last = {}
277 last = {}
278 ret = []
278 ret = []
279 for node in heads:
279 for node in heads:
280 c = self.changectx(node)
280 c = self.changectx(node)
281 rev = c.rev()
281 rev = c.rev()
282 try:
282 try:
283 fnode = c.filenode('.hgtags')
283 fnode = c.filenode('.hgtags')
284 except revlog.LookupError:
284 except revlog.LookupError:
285 continue
285 continue
286 ret.append((rev, node, fnode))
286 ret.append((rev, node, fnode))
287 if fnode in last:
287 if fnode in last:
288 ret[last[fnode]] = None
288 ret[last[fnode]] = None
289 last[fnode] = len(ret) - 1
289 last[fnode] = len(ret) - 1
290 return [item for item in ret if item]
290 return [item for item in ret if item]
291
291
292 def tagslist(self):
292 def tagslist(self):
293 '''return a list of tags ordered by revision'''
293 '''return a list of tags ordered by revision'''
294 l = []
294 l = []
295 for t, n in self.tags().items():
295 for t, n in self.tags().items():
296 try:
296 try:
297 r = self.changelog.rev(n)
297 r = self.changelog.rev(n)
298 except:
298 except:
299 r = -2 # sort to the beginning of the list if unknown
299 r = -2 # sort to the beginning of the list if unknown
300 l.append((r, t, n))
300 l.append((r, t, n))
301 l.sort()
301 l.sort()
302 return [(t, n) for r, t, n in l]
302 return [(t, n) for r, t, n in l]
303
303
304 def nodetags(self, node):
304 def nodetags(self, node):
305 '''return the tags associated with a node'''
305 '''return the tags associated with a node'''
306 if not self.nodetagscache:
306 if not self.nodetagscache:
307 self.nodetagscache = {}
307 self.nodetagscache = {}
308 for t, n in self.tags().items():
308 for t, n in self.tags().items():
309 self.nodetagscache.setdefault(n, []).append(t)
309 self.nodetagscache.setdefault(n, []).append(t)
310 return self.nodetagscache.get(node, [])
310 return self.nodetagscache.get(node, [])
311
311
312 def _branchtags(self):
312 def _branchtags(self):
313 partial, last, lrev = self._readbranchcache()
313 partial, last, lrev = self._readbranchcache()
314
314
315 tiprev = self.changelog.count() - 1
315 tiprev = self.changelog.count() - 1
316 if lrev != tiprev:
316 if lrev != tiprev:
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319
319
320 return partial
320 return partial
321
321
322 def branchtags(self):
322 def branchtags(self):
323 if self.branchcache is not None:
323 if self.branchcache is not None:
324 return self.branchcache
324 return self.branchcache
325
325
326 self.branchcache = {} # avoid recursion in changectx
326 self.branchcache = {} # avoid recursion in changectx
327 partial = self._branchtags()
327 partial = self._branchtags()
328
328
329 # the branch cache is stored on disk as UTF-8, but in the local
329 # the branch cache is stored on disk as UTF-8, but in the local
330 # charset internally
330 # charset internally
331 for k, v in partial.items():
331 for k, v in partial.items():
332 self.branchcache[util.tolocal(k)] = v
332 self.branchcache[util.tolocal(k)] = v
333 return self.branchcache
333 return self.branchcache
334
334
335 def _readbranchcache(self):
335 def _readbranchcache(self):
336 partial = {}
336 partial = {}
337 try:
337 try:
338 f = self.opener("branch.cache")
338 f = self.opener("branch.cache")
339 lines = f.read().split('\n')
339 lines = f.read().split('\n')
340 f.close()
340 f.close()
341 except (IOError, OSError):
341 except (IOError, OSError):
342 return {}, nullid, nullrev
342 return {}, nullid, nullrev
343
343
344 try:
344 try:
345 last, lrev = lines.pop(0).split(" ", 1)
345 last, lrev = lines.pop(0).split(" ", 1)
346 last, lrev = bin(last), int(lrev)
346 last, lrev = bin(last), int(lrev)
347 if not (lrev < self.changelog.count() and
347 if not (lrev < self.changelog.count() and
348 self.changelog.node(lrev) == last): # sanity check
348 self.changelog.node(lrev) == last): # sanity check
349 # invalidate the cache
349 # invalidate the cache
350 raise ValueError('Invalid branch cache: unknown tip')
350 raise ValueError('Invalid branch cache: unknown tip')
351 for l in lines:
351 for l in lines:
352 if not l: continue
352 if not l: continue
353 node, label = l.split(" ", 1)
353 node, label = l.split(" ", 1)
354 partial[label.strip()] = bin(node)
354 partial[label.strip()] = bin(node)
355 except (KeyboardInterrupt, util.SignalInterrupt):
355 except (KeyboardInterrupt, util.SignalInterrupt):
356 raise
356 raise
357 except Exception, inst:
357 except Exception, inst:
358 if self.ui.debugflag:
358 if self.ui.debugflag:
359 self.ui.warn(str(inst), '\n')
359 self.ui.warn(str(inst), '\n')
360 partial, last, lrev = {}, nullid, nullrev
360 partial, last, lrev = {}, nullid, nullrev
361 return partial, last, lrev
361 return partial, last, lrev
362
362
363 def _writebranchcache(self, branches, tip, tiprev):
363 def _writebranchcache(self, branches, tip, tiprev):
364 try:
364 try:
365 f = self.opener("branch.cache", "w", atomictemp=True)
365 f = self.opener("branch.cache", "w", atomictemp=True)
366 f.write("%s %s\n" % (hex(tip), tiprev))
366 f.write("%s %s\n" % (hex(tip), tiprev))
367 for label, node in branches.iteritems():
367 for label, node in branches.iteritems():
368 f.write("%s %s\n" % (hex(node), label))
368 f.write("%s %s\n" % (hex(node), label))
369 f.rename()
369 f.rename()
370 except (IOError, OSError):
370 except (IOError, OSError):
371 pass
371 pass
372
372
373 def _updatebranchcache(self, partial, start, end):
373 def _updatebranchcache(self, partial, start, end):
374 for r in xrange(start, end):
374 for r in xrange(start, end):
375 c = self.changectx(r)
375 c = self.changectx(r)
376 b = c.branch()
376 b = c.branch()
377 partial[b] = c.node()
377 partial[b] = c.node()
378
378
379 def lookup(self, key):
379 def lookup(self, key):
380 if key == '.':
380 if key == '.':
381 key, second = self.dirstate.parents()
381 key, second = self.dirstate.parents()
382 if key == nullid:
382 if key == nullid:
383 raise repo.RepoError(_("no revision checked out"))
383 raise repo.RepoError(_("no revision checked out"))
384 if second != nullid:
384 if second != nullid:
385 self.ui.warn(_("warning: working directory has two parents, "
385 self.ui.warn(_("warning: working directory has two parents, "
386 "tag '.' uses the first\n"))
386 "tag '.' uses the first\n"))
387 elif key == 'null':
387 elif key == 'null':
388 return nullid
388 return nullid
389 n = self.changelog._match(key)
389 n = self.changelog._match(key)
390 if n:
390 if n:
391 return n
391 return n
392 if key in self.tags():
392 if key in self.tags():
393 return self.tags()[key]
393 return self.tags()[key]
394 if key in self.branchtags():
394 if key in self.branchtags():
395 return self.branchtags()[key]
395 return self.branchtags()[key]
396 n = self.changelog._partialmatch(key)
396 n = self.changelog._partialmatch(key)
397 if n:
397 if n:
398 return n
398 return n
399 raise repo.RepoError(_("unknown revision '%s'") % key)
399 raise repo.RepoError(_("unknown revision '%s'") % key)
400
400
401 def dev(self):
401 def dev(self):
402 return os.lstat(self.path).st_dev
402 return os.lstat(self.path).st_dev
403
403
404 def local(self):
404 def local(self):
405 return True
405 return True
406
406
407 def join(self, f):
407 def join(self, f):
408 return os.path.join(self.path, f)
408 return os.path.join(self.path, f)
409
409
410 def sjoin(self, f):
410 def sjoin(self, f):
411 f = self.encodefn(f)
411 f = self.encodefn(f)
412 return os.path.join(self.spath, f)
412 return os.path.join(self.spath, f)
413
413
414 def wjoin(self, f):
414 def wjoin(self, f):
415 return os.path.join(self.root, f)
415 return os.path.join(self.root, f)
416
416
417 def file(self, f):
417 def file(self, f):
418 if f[0] == '/':
418 if f[0] == '/':
419 f = f[1:]
419 f = f[1:]
420 return filelog.filelog(self.sopener, f)
420 return filelog.filelog(self.sopener, f)
421
421
422 def changectx(self, changeid=None):
422 def changectx(self, changeid=None):
423 return context.changectx(self, changeid)
423 return context.changectx(self, changeid)
424
424
425 def workingctx(self):
425 def workingctx(self):
426 return context.workingctx(self)
426 return context.workingctx(self)
427
427
428 def parents(self, changeid=None):
428 def parents(self, changeid=None):
429 '''
429 '''
430 get list of changectxs for parents of changeid or working directory
430 get list of changectxs for parents of changeid or working directory
431 '''
431 '''
432 if changeid is None:
432 if changeid is None:
433 pl = self.dirstate.parents()
433 pl = self.dirstate.parents()
434 else:
434 else:
435 n = self.changelog.lookup(changeid)
435 n = self.changelog.lookup(changeid)
436 pl = self.changelog.parents(n)
436 pl = self.changelog.parents(n)
437 if pl[1] == nullid:
437 if pl[1] == nullid:
438 return [self.changectx(pl[0])]
438 return [self.changectx(pl[0])]
439 return [self.changectx(pl[0]), self.changectx(pl[1])]
439 return [self.changectx(pl[0]), self.changectx(pl[1])]
440
440
441 def filectx(self, path, changeid=None, fileid=None):
441 def filectx(self, path, changeid=None, fileid=None):
442 """changeid can be a changeset revision, node, or tag.
442 """changeid can be a changeset revision, node, or tag.
443 fileid can be a file revision or node."""
443 fileid can be a file revision or node."""
444 return context.filectx(self, path, changeid, fileid)
444 return context.filectx(self, path, changeid, fileid)
445
445
446 def getcwd(self):
446 def getcwd(self):
447 return self.dirstate.getcwd()
447 return self.dirstate.getcwd()
448
448
449 def pathto(self, f, cwd=None):
449 def pathto(self, f, cwd=None):
450 return self.dirstate.pathto(f, cwd)
450 return self.dirstate.pathto(f, cwd)
451
451
452 def wfile(self, f, mode='r'):
452 def wfile(self, f, mode='r'):
453 return self.wopener(f, mode)
453 return self.wopener(f, mode)
454
454
455 def _link(self, f):
455 def _link(self, f):
456 return os.path.islink(self.wjoin(f))
456 return os.path.islink(self.wjoin(f))
457
457
458 def _filter(self, filter, filename, data):
458 def _filter(self, filter, filename, data):
459 if filter not in self.filterpats:
459 if filter not in self.filterpats:
460 l = []
460 l = []
461 for pat, cmd in self.ui.configitems(filter):
461 for pat, cmd in self.ui.configitems(filter):
462 mf = util.matcher(self.root, "", [pat], [], [])[1]
462 mf = util.matcher(self.root, "", [pat], [], [])[1]
463 l.append((mf, cmd))
463 l.append((mf, cmd))
464 self.filterpats[filter] = l
464 self.filterpats[filter] = l
465
465
466 for mf, cmd in self.filterpats[filter]:
466 for mf, cmd in self.filterpats[filter]:
467 if mf(filename):
467 if mf(filename):
468 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
468 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
469 data = util.filter(data, cmd)
469 data = util.filter(data, cmd)
470 break
470 break
471
471
472 return data
472 return data
473
473
474 def wread(self, filename):
474 def wread(self, filename):
475 if self._link(filename):
475 if self._link(filename):
476 data = os.readlink(self.wjoin(filename))
476 data = os.readlink(self.wjoin(filename))
477 else:
477 else:
478 data = self.wopener(filename, 'r').read()
478 data = self.wopener(filename, 'r').read()
479 return self._filter("encode", filename, data)
479 return self._filter("encode", filename, data)
480
480
481 def wwrite(self, filename, data, flags):
481 def wwrite(self, filename, data, flags):
482 data = self._filter("decode", filename, data)
482 data = self._filter("decode", filename, data)
483 if "l" in flags:
483 if "l" in flags:
484 self.wopener.symlink(data, filename)
484 self.wopener.symlink(data, filename)
485 else:
485 else:
486 try:
486 try:
487 if self._link(filename):
487 if self._link(filename):
488 os.unlink(self.wjoin(filename))
488 os.unlink(self.wjoin(filename))
489 except OSError:
489 except OSError:
490 pass
490 pass
491 self.wopener(filename, 'w').write(data)
491 self.wopener(filename, 'w').write(data)
492 util.set_exec(self.wjoin(filename), "x" in flags)
492 util.set_exec(self.wjoin(filename), "x" in flags)
493
493
494 def wwritedata(self, filename, data):
494 def wwritedata(self, filename, data):
495 return self._filter("decode", filename, data)
495 return self._filter("decode", filename, data)
496
496
497 def transaction(self):
497 def transaction(self):
498 tr = self.transhandle
498 tr = self.transhandle
499 if tr != None and tr.running():
499 if tr != None and tr.running():
500 return tr.nest()
500 return tr.nest()
501
501
502 # save dirstate for rollback
502 # save dirstate for rollback
503 try:
503 try:
504 ds = self.opener("dirstate").read()
504 ds = self.opener("dirstate").read()
505 except IOError:
505 except IOError:
506 ds = ""
506 ds = ""
507 self.opener("journal.dirstate", "w").write(ds)
507 self.opener("journal.dirstate", "w").write(ds)
508
508
509 renames = [(self.sjoin("journal"), self.sjoin("undo")),
509 renames = [(self.sjoin("journal"), self.sjoin("undo")),
510 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
510 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
511 tr = transaction.transaction(self.ui.warn, self.sopener,
511 tr = transaction.transaction(self.ui.warn, self.sopener,
512 self.sjoin("journal"),
512 self.sjoin("journal"),
513 aftertrans(renames))
513 aftertrans(renames))
514 self.transhandle = tr
514 self.transhandle = tr
515 return tr
515 return tr
516
516
517 def recover(self):
517 def recover(self):
518 l = self.lock()
518 l = self.lock()
519 if os.path.exists(self.sjoin("journal")):
519 if os.path.exists(self.sjoin("journal")):
520 self.ui.status(_("rolling back interrupted transaction\n"))
520 self.ui.status(_("rolling back interrupted transaction\n"))
521 transaction.rollback(self.sopener, self.sjoin("journal"))
521 transaction.rollback(self.sopener, self.sjoin("journal"))
522 self.invalidate()
522 self.invalidate()
523 return True
523 return True
524 else:
524 else:
525 self.ui.warn(_("no interrupted transaction available\n"))
525 self.ui.warn(_("no interrupted transaction available\n"))
526 return False
526 return False
527
527
528 def rollback(self, wlock=None, lock=None):
528 def rollback(self, wlock=None, lock=None):
529 if not wlock:
529 if not wlock:
530 wlock = self.wlock()
530 wlock = self.wlock()
531 if not lock:
531 if not lock:
532 lock = self.lock()
532 lock = self.lock()
533 if os.path.exists(self.sjoin("undo")):
533 if os.path.exists(self.sjoin("undo")):
534 self.ui.status(_("rolling back last transaction\n"))
534 self.ui.status(_("rolling back last transaction\n"))
535 transaction.rollback(self.sopener, self.sjoin("undo"))
535 transaction.rollback(self.sopener, self.sjoin("undo"))
536 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
536 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
537 self.invalidate()
537 self.invalidate()
538 self.dirstate.invalidate()
538 self.dirstate.invalidate()
539 else:
539 else:
540 self.ui.warn(_("no rollback information available\n"))
540 self.ui.warn(_("no rollback information available\n"))
541
541
542 def invalidate(self):
542 def invalidate(self):
543 for a in "changelog manifest".split():
543 for a in "changelog manifest".split():
544 if hasattr(self, a):
544 if hasattr(self, a):
545 self.__delattr__(a)
545 self.__delattr__(a)
546 self.tagscache = None
546 self.tagscache = None
547 self.nodetagscache = None
547 self.nodetagscache = None
548
548
549 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
549 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
550 desc=None):
551 try:
550 try:
552 l = lock.lock(lockname, 0, releasefn, desc=desc)
551 l = lock.lock(lockname, 0, releasefn, desc=desc)
553 except lock.LockHeld, inst:
552 except lock.LockHeld, inst:
554 if not wait:
553 if not wait:
555 raise
554 raise
556 self.ui.warn(_("waiting for lock on %s held by %r\n") %
555 self.ui.warn(_("waiting for lock on %s held by %r\n") %
557 (desc, inst.locker))
556 (desc, inst.locker))
558 # default to 600 seconds timeout
557 # default to 600 seconds timeout
559 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
558 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
560 releasefn, desc=desc)
559 releasefn, desc=desc)
561 if acquirefn:
560 if acquirefn:
562 acquirefn()
561 acquirefn()
563 return l
562 return l
564
563
565 def lock(self, wait=1):
564 def lock(self, wait=1):
566 return self.do_lock(self.sjoin("lock"), wait,
565 return self._lock(self.sjoin("lock"), wait, None, self.invalidate,
567 acquirefn=self.invalidate,
566 _('repository %s') % self.origroot)
568 desc=_('repository %s') % self.origroot)
569
567
570 def wlock(self, wait=1):
568 def wlock(self, wait=1):
571 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
569 return self._lock(self.join("wlock"), wait, self.dirstate.write,
572 self.dirstate.invalidate,
570 self.dirstate.invalidate,
573 desc=_('working directory of %s') % self.origroot)
571 _('working directory of %s') % self.origroot)
574
572
575 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
573 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
576 """
574 """
577 commit an individual file as part of a larger transaction
575 commit an individual file as part of a larger transaction
578 """
576 """
579
577
580 t = self.wread(fn)
578 t = self.wread(fn)
581 fl = self.file(fn)
579 fl = self.file(fn)
582 fp1 = manifest1.get(fn, nullid)
580 fp1 = manifest1.get(fn, nullid)
583 fp2 = manifest2.get(fn, nullid)
581 fp2 = manifest2.get(fn, nullid)
584
582
585 meta = {}
583 meta = {}
586 cp = self.dirstate.copied(fn)
584 cp = self.dirstate.copied(fn)
587 if cp:
585 if cp:
588 # Mark the new revision of this file as a copy of another
586 # Mark the new revision of this file as a copy of another
589 # file. This copy data will effectively act as a parent
587 # file. This copy data will effectively act as a parent
590 # of this new revision. If this is a merge, the first
588 # of this new revision. If this is a merge, the first
591 # parent will be the nullid (meaning "look up the copy data")
589 # parent will be the nullid (meaning "look up the copy data")
592 # and the second one will be the other parent. For example:
590 # and the second one will be the other parent. For example:
593 #
591 #
594 # 0 --- 1 --- 3 rev1 changes file foo
592 # 0 --- 1 --- 3 rev1 changes file foo
595 # \ / rev2 renames foo to bar and changes it
593 # \ / rev2 renames foo to bar and changes it
596 # \- 2 -/ rev3 should have bar with all changes and
594 # \- 2 -/ rev3 should have bar with all changes and
597 # should record that bar descends from
595 # should record that bar descends from
598 # bar in rev2 and foo in rev1
596 # bar in rev2 and foo in rev1
599 #
597 #
600 # this allows this merge to succeed:
598 # this allows this merge to succeed:
601 #
599 #
602 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
600 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
603 # \ / merging rev3 and rev4 should use bar@rev2
601 # \ / merging rev3 and rev4 should use bar@rev2
604 # \- 2 --- 4 as the merge base
602 # \- 2 --- 4 as the merge base
605 #
603 #
606 meta["copy"] = cp
604 meta["copy"] = cp
607 if not manifest2: # not a branch merge
605 if not manifest2: # not a branch merge
608 meta["copyrev"] = hex(manifest1.get(cp, nullid))
606 meta["copyrev"] = hex(manifest1.get(cp, nullid))
609 fp2 = nullid
607 fp2 = nullid
610 elif fp2 != nullid: # copied on remote side
608 elif fp2 != nullid: # copied on remote side
611 meta["copyrev"] = hex(manifest1.get(cp, nullid))
609 meta["copyrev"] = hex(manifest1.get(cp, nullid))
612 elif fp1 != nullid: # copied on local side, reversed
610 elif fp1 != nullid: # copied on local side, reversed
613 meta["copyrev"] = hex(manifest2.get(cp))
611 meta["copyrev"] = hex(manifest2.get(cp))
614 fp2 = fp1
612 fp2 = fp1
615 else: # directory rename
613 else: # directory rename
616 meta["copyrev"] = hex(manifest1.get(cp, nullid))
614 meta["copyrev"] = hex(manifest1.get(cp, nullid))
617 self.ui.debug(_(" %s: copy %s:%s\n") %
615 self.ui.debug(_(" %s: copy %s:%s\n") %
618 (fn, cp, meta["copyrev"]))
616 (fn, cp, meta["copyrev"]))
619 fp1 = nullid
617 fp1 = nullid
620 elif fp2 != nullid:
618 elif fp2 != nullid:
621 # is one parent an ancestor of the other?
619 # is one parent an ancestor of the other?
622 fpa = fl.ancestor(fp1, fp2)
620 fpa = fl.ancestor(fp1, fp2)
623 if fpa == fp1:
621 if fpa == fp1:
624 fp1, fp2 = fp2, nullid
622 fp1, fp2 = fp2, nullid
625 elif fpa == fp2:
623 elif fpa == fp2:
626 fp2 = nullid
624 fp2 = nullid
627
625
628 # is the file unmodified from the parent? report existing entry
626 # is the file unmodified from the parent? report existing entry
629 if fp2 == nullid and not fl.cmp(fp1, t):
627 if fp2 == nullid and not fl.cmp(fp1, t):
630 return fp1
628 return fp1
631
629
632 changelist.append(fn)
630 changelist.append(fn)
633 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
631 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
634
632
635 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
633 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
636 if p1 is None:
634 if p1 is None:
637 p1, p2 = self.dirstate.parents()
635 p1, p2 = self.dirstate.parents()
638 return self.commit(files=files, text=text, user=user, date=date,
636 return self.commit(files=files, text=text, user=user, date=date,
639 p1=p1, p2=p2, wlock=wlock, extra=extra)
637 p1=p1, p2=p2, wlock=wlock, extra=extra)
640
638
641 def commit(self, files=None, text="", user=None, date=None,
639 def commit(self, files=None, text="", user=None, date=None,
642 match=util.always, force=False, lock=None, wlock=None,
640 match=util.always, force=False, lock=None, wlock=None,
643 force_editor=False, p1=None, p2=None, extra={}):
641 force_editor=False, p1=None, p2=None, extra={}):
644
642
645 commit = []
643 commit = []
646 remove = []
644 remove = []
647 changed = []
645 changed = []
648 use_dirstate = (p1 is None) # not rawcommit
646 use_dirstate = (p1 is None) # not rawcommit
649 extra = extra.copy()
647 extra = extra.copy()
650
648
651 if use_dirstate:
649 if use_dirstate:
652 if files:
650 if files:
653 for f in files:
651 for f in files:
654 s = self.dirstate[f]
652 s = self.dirstate[f]
655 if s in 'nma':
653 if s in 'nma':
656 commit.append(f)
654 commit.append(f)
657 elif s == 'r':
655 elif s == 'r':
658 remove.append(f)
656 remove.append(f)
659 else:
657 else:
660 self.ui.warn(_("%s not tracked!\n") % f)
658 self.ui.warn(_("%s not tracked!\n") % f)
661 else:
659 else:
662 changes = self.status(match=match)[:5]
660 changes = self.status(match=match)[:5]
663 modified, added, removed, deleted, unknown = changes
661 modified, added, removed, deleted, unknown = changes
664 commit = modified + added
662 commit = modified + added
665 remove = removed
663 remove = removed
666 else:
664 else:
667 commit = files
665 commit = files
668
666
669 if use_dirstate:
667 if use_dirstate:
670 p1, p2 = self.dirstate.parents()
668 p1, p2 = self.dirstate.parents()
671 update_dirstate = True
669 update_dirstate = True
672 else:
670 else:
673 p1, p2 = p1, p2 or nullid
671 p1, p2 = p1, p2 or nullid
674 update_dirstate = (self.dirstate.parents()[0] == p1)
672 update_dirstate = (self.dirstate.parents()[0] == p1)
675
673
676 c1 = self.changelog.read(p1)
674 c1 = self.changelog.read(p1)
677 c2 = self.changelog.read(p2)
675 c2 = self.changelog.read(p2)
678 m1 = self.manifest.read(c1[0]).copy()
676 m1 = self.manifest.read(c1[0]).copy()
679 m2 = self.manifest.read(c2[0])
677 m2 = self.manifest.read(c2[0])
680
678
681 if use_dirstate:
679 if use_dirstate:
682 branchname = self.workingctx().branch()
680 branchname = self.workingctx().branch()
683 try:
681 try:
684 branchname = branchname.decode('UTF-8').encode('UTF-8')
682 branchname = branchname.decode('UTF-8').encode('UTF-8')
685 except UnicodeDecodeError:
683 except UnicodeDecodeError:
686 raise util.Abort(_('branch name not in UTF-8!'))
684 raise util.Abort(_('branch name not in UTF-8!'))
687 else:
685 else:
688 branchname = ""
686 branchname = ""
689
687
690 if use_dirstate:
688 if use_dirstate:
691 oldname = c1[5].get("branch") # stored in UTF-8
689 oldname = c1[5].get("branch") # stored in UTF-8
692 if (not commit and not remove and not force and p2 == nullid
690 if (not commit and not remove and not force and p2 == nullid
693 and branchname == oldname):
691 and branchname == oldname):
694 self.ui.status(_("nothing changed\n"))
692 self.ui.status(_("nothing changed\n"))
695 return None
693 return None
696
694
697 xp1 = hex(p1)
695 xp1 = hex(p1)
698 if p2 == nullid: xp2 = ''
696 if p2 == nullid: xp2 = ''
699 else: xp2 = hex(p2)
697 else: xp2 = hex(p2)
700
698
701 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
699 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
702
700
703 if not wlock:
701 if not wlock:
704 wlock = self.wlock()
702 wlock = self.wlock()
705 if not lock:
703 if not lock:
706 lock = self.lock()
704 lock = self.lock()
707 tr = self.transaction()
705 tr = self.transaction()
708
706
709 # check in files
707 # check in files
710 new = {}
708 new = {}
711 linkrev = self.changelog.count()
709 linkrev = self.changelog.count()
712 commit.sort()
710 commit.sort()
713 is_exec = util.execfunc(self.root, m1.execf)
711 is_exec = util.execfunc(self.root, m1.execf)
714 is_link = util.linkfunc(self.root, m1.linkf)
712 is_link = util.linkfunc(self.root, m1.linkf)
715 for f in commit:
713 for f in commit:
716 self.ui.note(f + "\n")
714 self.ui.note(f + "\n")
717 try:
715 try:
718 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
716 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
719 new_exec = is_exec(f)
717 new_exec = is_exec(f)
720 new_link = is_link(f)
718 new_link = is_link(f)
721 if not changed or changed[-1] != f:
719 if not changed or changed[-1] != f:
722 # mention the file in the changelog if some flag changed,
720 # mention the file in the changelog if some flag changed,
723 # even if there was no content change.
721 # even if there was no content change.
724 old_exec = m1.execf(f)
722 old_exec = m1.execf(f)
725 old_link = m1.linkf(f)
723 old_link = m1.linkf(f)
726 if old_exec != new_exec or old_link != new_link:
724 if old_exec != new_exec or old_link != new_link:
727 changed.append(f)
725 changed.append(f)
728 m1.set(f, new_exec, new_link)
726 m1.set(f, new_exec, new_link)
729 except (OSError, IOError):
727 except (OSError, IOError):
730 if use_dirstate:
728 if use_dirstate:
731 self.ui.warn(_("trouble committing %s!\n") % f)
729 self.ui.warn(_("trouble committing %s!\n") % f)
732 raise
730 raise
733 else:
731 else:
734 remove.append(f)
732 remove.append(f)
735
733
736 # update manifest
734 # update manifest
737 m1.update(new)
735 m1.update(new)
738 remove.sort()
736 remove.sort()
739 removed = []
737 removed = []
740
738
741 for f in remove:
739 for f in remove:
742 if f in m1:
740 if f in m1:
743 del m1[f]
741 del m1[f]
744 removed.append(f)
742 removed.append(f)
745 elif f in m2:
743 elif f in m2:
746 removed.append(f)
744 removed.append(f)
747 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
745 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
748
746
749 # add changeset
747 # add changeset
750 new = new.keys()
748 new = new.keys()
751 new.sort()
749 new.sort()
752
750
753 user = user or self.ui.username()
751 user = user or self.ui.username()
754 if not text or force_editor:
752 if not text or force_editor:
755 edittext = []
753 edittext = []
756 if text:
754 if text:
757 edittext.append(text)
755 edittext.append(text)
758 edittext.append("")
756 edittext.append("")
759 edittext.append("HG: user: %s" % user)
757 edittext.append("HG: user: %s" % user)
760 if p2 != nullid:
758 if p2 != nullid:
761 edittext.append("HG: branch merge")
759 edittext.append("HG: branch merge")
762 if branchname:
760 if branchname:
763 edittext.append("HG: branch %s" % util.tolocal(branchname))
761 edittext.append("HG: branch %s" % util.tolocal(branchname))
764 edittext.extend(["HG: changed %s" % f for f in changed])
762 edittext.extend(["HG: changed %s" % f for f in changed])
765 edittext.extend(["HG: removed %s" % f for f in removed])
763 edittext.extend(["HG: removed %s" % f for f in removed])
766 if not changed and not remove:
764 if not changed and not remove:
767 edittext.append("HG: no files changed")
765 edittext.append("HG: no files changed")
768 edittext.append("")
766 edittext.append("")
769 # run editor in the repository root
767 # run editor in the repository root
770 olddir = os.getcwd()
768 olddir = os.getcwd()
771 os.chdir(self.root)
769 os.chdir(self.root)
772 text = self.ui.edit("\n".join(edittext), user)
770 text = self.ui.edit("\n".join(edittext), user)
773 os.chdir(olddir)
771 os.chdir(olddir)
774
772
775 lines = [line.rstrip() for line in text.rstrip().splitlines()]
773 lines = [line.rstrip() for line in text.rstrip().splitlines()]
776 while lines and not lines[0]:
774 while lines and not lines[0]:
777 del lines[0]
775 del lines[0]
778 if not lines:
776 if not lines:
779 return None
777 return None
780 text = '\n'.join(lines)
778 text = '\n'.join(lines)
781 if branchname:
779 if branchname:
782 extra["branch"] = branchname
780 extra["branch"] = branchname
783 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
781 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
784 user, date, extra)
782 user, date, extra)
785 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
783 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
786 parent2=xp2)
784 parent2=xp2)
787 tr.close()
785 tr.close()
788
786
789 if self.branchcache and "branch" in extra:
787 if self.branchcache and "branch" in extra:
790 self.branchcache[util.tolocal(extra["branch"])] = n
788 self.branchcache[util.tolocal(extra["branch"])] = n
791
789
792 if use_dirstate or update_dirstate:
790 if use_dirstate or update_dirstate:
793 self.dirstate.setparents(n)
791 self.dirstate.setparents(n)
794 if use_dirstate:
792 if use_dirstate:
795 for f in new:
793 for f in new:
796 self.dirstate.normal(f)
794 self.dirstate.normal(f)
797 for f in removed:
795 for f in removed:
798 self.dirstate.forget(f)
796 self.dirstate.forget(f)
799
797
800 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
798 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
801 return n
799 return n
802
800
803 def walk(self, node=None, files=[], match=util.always, badmatch=None):
801 def walk(self, node=None, files=[], match=util.always, badmatch=None):
804 '''
802 '''
805 walk recursively through the directory tree or a given
803 walk recursively through the directory tree or a given
806 changeset, finding all files matched by the match
804 changeset, finding all files matched by the match
807 function
805 function
808
806
809 results are yielded in a tuple (src, filename), where src
807 results are yielded in a tuple (src, filename), where src
810 is one of:
808 is one of:
811 'f' the file was found in the directory tree
809 'f' the file was found in the directory tree
812 'm' the file was only in the dirstate and not in the tree
810 'm' the file was only in the dirstate and not in the tree
813 'b' file was not found and matched badmatch
811 'b' file was not found and matched badmatch
814 '''
812 '''
815
813
816 if node:
814 if node:
817 fdict = dict.fromkeys(files)
815 fdict = dict.fromkeys(files)
818 # for dirstate.walk, files=['.'] means "walk the whole tree".
816 # for dirstate.walk, files=['.'] means "walk the whole tree".
819 # follow that here, too
817 # follow that here, too
820 fdict.pop('.', None)
818 fdict.pop('.', None)
821 mdict = self.manifest.read(self.changelog.read(node)[0])
819 mdict = self.manifest.read(self.changelog.read(node)[0])
822 mfiles = mdict.keys()
820 mfiles = mdict.keys()
823 mfiles.sort()
821 mfiles.sort()
824 for fn in mfiles:
822 for fn in mfiles:
825 for ffn in fdict:
823 for ffn in fdict:
826 # match if the file is the exact name or a directory
824 # match if the file is the exact name or a directory
827 if ffn == fn or fn.startswith("%s/" % ffn):
825 if ffn == fn or fn.startswith("%s/" % ffn):
828 del fdict[ffn]
826 del fdict[ffn]
829 break
827 break
830 if match(fn):
828 if match(fn):
831 yield 'm', fn
829 yield 'm', fn
832 ffiles = fdict.keys()
830 ffiles = fdict.keys()
833 ffiles.sort()
831 ffiles.sort()
834 for fn in ffiles:
832 for fn in ffiles:
835 if badmatch and badmatch(fn):
833 if badmatch and badmatch(fn):
836 if match(fn):
834 if match(fn):
837 yield 'b', fn
835 yield 'b', fn
838 else:
836 else:
839 self.ui.warn(_('%s: No such file in rev %s\n')
837 self.ui.warn(_('%s: No such file in rev %s\n')
840 % (self.pathto(fn), short(node)))
838 % (self.pathto(fn), short(node)))
841 else:
839 else:
842 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
840 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
843 yield src, fn
841 yield src, fn
844
842
845 def status(self, node1=None, node2=None, files=[], match=util.always,
843 def status(self, node1=None, node2=None, files=[], match=util.always,
846 wlock=None, list_ignored=False, list_clean=False):
844 wlock=None, list_ignored=False, list_clean=False):
847 """return status of files between two nodes or node and working directory
845 """return status of files between two nodes or node and working directory
848
846
849 If node1 is None, use the first dirstate parent instead.
847 If node1 is None, use the first dirstate parent instead.
850 If node2 is None, compare node1 with working directory.
848 If node2 is None, compare node1 with working directory.
851 """
849 """
852
850
853 def fcmp(fn, getnode):
851 def fcmp(fn, getnode):
854 t1 = self.wread(fn)
852 t1 = self.wread(fn)
855 return self.file(fn).cmp(getnode(fn), t1)
853 return self.file(fn).cmp(getnode(fn), t1)
856
854
857 def mfmatches(node):
855 def mfmatches(node):
858 change = self.changelog.read(node)
856 change = self.changelog.read(node)
859 mf = self.manifest.read(change[0]).copy()
857 mf = self.manifest.read(change[0]).copy()
860 for fn in mf.keys():
858 for fn in mf.keys():
861 if not match(fn):
859 if not match(fn):
862 del mf[fn]
860 del mf[fn]
863 return mf
861 return mf
864
862
865 modified, added, removed, deleted, unknown = [], [], [], [], []
863 modified, added, removed, deleted, unknown = [], [], [], [], []
866 ignored, clean = [], []
864 ignored, clean = [], []
867
865
868 compareworking = False
866 compareworking = False
869 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
867 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
870 compareworking = True
868 compareworking = True
871
869
872 if not compareworking:
870 if not compareworking:
873 # read the manifest from node1 before the manifest from node2,
871 # read the manifest from node1 before the manifest from node2,
874 # so that we'll hit the manifest cache if we're going through
872 # so that we'll hit the manifest cache if we're going through
875 # all the revisions in parent->child order.
873 # all the revisions in parent->child order.
876 mf1 = mfmatches(node1)
874 mf1 = mfmatches(node1)
877
875
878 # are we comparing the working directory?
876 # are we comparing the working directory?
879 if not node2:
877 if not node2:
880 (lookup, modified, added, removed, deleted, unknown,
878 (lookup, modified, added, removed, deleted, unknown,
881 ignored, clean) = self.dirstate.status(files, match,
879 ignored, clean) = self.dirstate.status(files, match,
882 list_ignored, list_clean)
880 list_ignored, list_clean)
883
881
884 # are we comparing working dir against its parent?
882 # are we comparing working dir against its parent?
885 if compareworking:
883 if compareworking:
886 if lookup:
884 if lookup:
887 fixup = []
885 fixup = []
888 # do a full compare of any files that might have changed
886 # do a full compare of any files that might have changed
889 ctx = self.changectx()
887 ctx = self.changectx()
890 for f in lookup:
888 for f in lookup:
891 if f not in ctx or ctx[f].cmp(self.wread(f)):
889 if f not in ctx or ctx[f].cmp(self.wread(f)):
892 modified.append(f)
890 modified.append(f)
893 else:
891 else:
894 fixup.append(f)
892 fixup.append(f)
895 if list_clean:
893 if list_clean:
896 clean.append(f)
894 clean.append(f)
897
895
898 # update dirstate for files that are actually clean
896 # update dirstate for files that are actually clean
899 if fixup:
897 if fixup:
900 cleanup = False
898 cleanup = False
901 if not wlock:
899 if not wlock:
902 try:
900 try:
903 wlock = self.wlock(wait=0)
901 wlock = self.wlock(wait=0)
904 cleanup = True
902 cleanup = True
905 except lock.LockException:
903 except lock.LockException:
906 pass
904 pass
907 if wlock:
905 if wlock:
908 for f in fixup:
906 for f in fixup:
909 self.dirstate.normal(f)
907 self.dirstate.normal(f)
910 if cleanup:
908 if cleanup:
911 wlock.release()
909 wlock.release()
912 else:
910 else:
913 # we are comparing working dir against non-parent
911 # we are comparing working dir against non-parent
914 # generate a pseudo-manifest for the working dir
912 # generate a pseudo-manifest for the working dir
915 # XXX: create it in dirstate.py ?
913 # XXX: create it in dirstate.py ?
916 mf2 = mfmatches(self.dirstate.parents()[0])
914 mf2 = mfmatches(self.dirstate.parents()[0])
917 is_exec = util.execfunc(self.root, mf2.execf)
915 is_exec = util.execfunc(self.root, mf2.execf)
918 is_link = util.linkfunc(self.root, mf2.linkf)
916 is_link = util.linkfunc(self.root, mf2.linkf)
919 for f in lookup + modified + added:
917 for f in lookup + modified + added:
920 mf2[f] = ""
918 mf2[f] = ""
921 mf2.set(f, is_exec(f), is_link(f))
919 mf2.set(f, is_exec(f), is_link(f))
922 for f in removed:
920 for f in removed:
923 if f in mf2:
921 if f in mf2:
924 del mf2[f]
922 del mf2[f]
925
923
926 else:
924 else:
927 # we are comparing two revisions
925 # we are comparing two revisions
928 mf2 = mfmatches(node2)
926 mf2 = mfmatches(node2)
929
927
930 if not compareworking:
928 if not compareworking:
931 # flush lists from dirstate before comparing manifests
929 # flush lists from dirstate before comparing manifests
932 modified, added, clean = [], [], []
930 modified, added, clean = [], [], []
933
931
934 # make sure to sort the files so we talk to the disk in a
932 # make sure to sort the files so we talk to the disk in a
935 # reasonable order
933 # reasonable order
936 mf2keys = mf2.keys()
934 mf2keys = mf2.keys()
937 mf2keys.sort()
935 mf2keys.sort()
938 getnode = lambda fn: mf1.get(fn, nullid)
936 getnode = lambda fn: mf1.get(fn, nullid)
939 for fn in mf2keys:
937 for fn in mf2keys:
940 if mf1.has_key(fn):
938 if mf1.has_key(fn):
941 if (mf1.flags(fn) != mf2.flags(fn) or
939 if (mf1.flags(fn) != mf2.flags(fn) or
942 (mf1[fn] != mf2[fn] and
940 (mf1[fn] != mf2[fn] and
943 (mf2[fn] != "" or fcmp(fn, getnode)))):
941 (mf2[fn] != "" or fcmp(fn, getnode)))):
944 modified.append(fn)
942 modified.append(fn)
945 elif list_clean:
943 elif list_clean:
946 clean.append(fn)
944 clean.append(fn)
947 del mf1[fn]
945 del mf1[fn]
948 else:
946 else:
949 added.append(fn)
947 added.append(fn)
950
948
951 removed = mf1.keys()
949 removed = mf1.keys()
952
950
953 # sort and return results:
951 # sort and return results:
954 for l in modified, added, removed, deleted, unknown, ignored, clean:
952 for l in modified, added, removed, deleted, unknown, ignored, clean:
955 l.sort()
953 l.sort()
956 return (modified, added, removed, deleted, unknown, ignored, clean)
954 return (modified, added, removed, deleted, unknown, ignored, clean)
957
955
958 def add(self, list, wlock=None):
956 def add(self, list, wlock=None):
959 if not wlock:
957 if not wlock:
960 wlock = self.wlock()
958 wlock = self.wlock()
961 for f in list:
959 for f in list:
962 p = self.wjoin(f)
960 p = self.wjoin(f)
963 try:
961 try:
964 st = os.lstat(p)
962 st = os.lstat(p)
965 except:
963 except:
966 self.ui.warn(_("%s does not exist!\n") % f)
964 self.ui.warn(_("%s does not exist!\n") % f)
967 continue
965 continue
968 if st.st_size > 10000000:
966 if st.st_size > 10000000:
969 self.ui.warn(_("%s: files over 10MB may cause memory and"
967 self.ui.warn(_("%s: files over 10MB may cause memory and"
970 " performance problems\n"
968 " performance problems\n"
971 "(use 'hg revert %s' to unadd the file)\n")
969 "(use 'hg revert %s' to unadd the file)\n")
972 % (f, f))
970 % (f, f))
973 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
971 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
974 self.ui.warn(_("%s not added: only files and symlinks "
972 self.ui.warn(_("%s not added: only files and symlinks "
975 "supported currently\n") % f)
973 "supported currently\n") % f)
976 elif self.dirstate[f] in 'an':
974 elif self.dirstate[f] in 'an':
977 self.ui.warn(_("%s already tracked!\n") % f)
975 self.ui.warn(_("%s already tracked!\n") % f)
978 else:
976 else:
979 self.dirstate.add(f)
977 self.dirstate.add(f)
980
978
981 def forget(self, list, wlock=None):
979 def forget(self, list, wlock=None):
982 if not wlock:
980 if not wlock:
983 wlock = self.wlock()
981 wlock = self.wlock()
984 for f in list:
982 for f in list:
985 if self.dirstate[f] != 'a':
983 if self.dirstate[f] != 'a':
986 self.ui.warn(_("%s not added!\n") % f)
984 self.ui.warn(_("%s not added!\n") % f)
987 else:
985 else:
988 self.dirstate.forget(f)
986 self.dirstate.forget(f)
989
987
990 def remove(self, list, unlink=False, wlock=None):
988 def remove(self, list, unlink=False, wlock=None):
991 if unlink:
989 if unlink:
992 for f in list:
990 for f in list:
993 try:
991 try:
994 util.unlink(self.wjoin(f))
992 util.unlink(self.wjoin(f))
995 except OSError, inst:
993 except OSError, inst:
996 if inst.errno != errno.ENOENT:
994 if inst.errno != errno.ENOENT:
997 raise
995 raise
998 if not wlock:
996 if not wlock:
999 wlock = self.wlock()
997 wlock = self.wlock()
1000 for f in list:
998 for f in list:
1001 if unlink and os.path.exists(self.wjoin(f)):
999 if unlink and os.path.exists(self.wjoin(f)):
1002 self.ui.warn(_("%s still exists!\n") % f)
1000 self.ui.warn(_("%s still exists!\n") % f)
1003 elif self.dirstate[f] == 'a':
1001 elif self.dirstate[f] == 'a':
1004 self.dirstate.forget(f)
1002 self.dirstate.forget(f)
1005 elif f not in self.dirstate:
1003 elif f not in self.dirstate:
1006 self.ui.warn(_("%s not tracked!\n") % f)
1004 self.ui.warn(_("%s not tracked!\n") % f)
1007 else:
1005 else:
1008 self.dirstate.remove(f)
1006 self.dirstate.remove(f)
1009
1007
1010 def undelete(self, list, wlock=None):
1008 def undelete(self, list, wlock=None):
1011 p = self.dirstate.parents()[0]
1009 p = self.dirstate.parents()[0]
1012 mn = self.changelog.read(p)[0]
1010 mn = self.changelog.read(p)[0]
1013 m = self.manifest.read(mn)
1011 m = self.manifest.read(mn)
1014 if not wlock:
1012 if not wlock:
1015 wlock = self.wlock()
1013 wlock = self.wlock()
1016 for f in list:
1014 for f in list:
1017 if self.dirstate[f] != 'r':
1015 if self.dirstate[f] != 'r':
1018 self.ui.warn("%s not removed!\n" % f)
1016 self.ui.warn("%s not removed!\n" % f)
1019 else:
1017 else:
1020 t = self.file(f).read(m[f])
1018 t = self.file(f).read(m[f])
1021 self.wwrite(f, t, m.flags(f))
1019 self.wwrite(f, t, m.flags(f))
1022 self.dirstate.normal(f)
1020 self.dirstate.normal(f)
1023
1021
1024 def copy(self, source, dest, wlock=None):
1022 def copy(self, source, dest, wlock=None):
1025 p = self.wjoin(dest)
1023 p = self.wjoin(dest)
1026 if not (os.path.exists(p) or os.path.islink(p)):
1024 if not (os.path.exists(p) or os.path.islink(p)):
1027 self.ui.warn(_("%s does not exist!\n") % dest)
1025 self.ui.warn(_("%s does not exist!\n") % dest)
1028 elif not (os.path.isfile(p) or os.path.islink(p)):
1026 elif not (os.path.isfile(p) or os.path.islink(p)):
1029 self.ui.warn(_("copy failed: %s is not a file or a "
1027 self.ui.warn(_("copy failed: %s is not a file or a "
1030 "symbolic link\n") % dest)
1028 "symbolic link\n") % dest)
1031 else:
1029 else:
1032 if not wlock:
1030 if not wlock:
1033 wlock = self.wlock()
1031 wlock = self.wlock()
1034 if dest not in self.dirstate:
1032 if dest not in self.dirstate:
1035 self.dirstate.add(dest)
1033 self.dirstate.add(dest)
1036 self.dirstate.copy(source, dest)
1034 self.dirstate.copy(source, dest)
1037
1035
1038 def heads(self, start=None):
1036 def heads(self, start=None):
1039 heads = self.changelog.heads(start)
1037 heads = self.changelog.heads(start)
1040 # sort the output in rev descending order
1038 # sort the output in rev descending order
1041 heads = [(-self.changelog.rev(h), h) for h in heads]
1039 heads = [(-self.changelog.rev(h), h) for h in heads]
1042 heads.sort()
1040 heads.sort()
1043 return [n for (r, n) in heads]
1041 return [n for (r, n) in heads]
1044
1042
1045 def branchheads(self, branch, start=None):
1043 def branchheads(self, branch, start=None):
1046 branches = self.branchtags()
1044 branches = self.branchtags()
1047 if branch not in branches:
1045 if branch not in branches:
1048 return []
1046 return []
1049 # The basic algorithm is this:
1047 # The basic algorithm is this:
1050 #
1048 #
1051 # Start from the branch tip since there are no later revisions that can
1049 # Start from the branch tip since there are no later revisions that can
1052 # possibly be in this branch, and the tip is a guaranteed head.
1050 # possibly be in this branch, and the tip is a guaranteed head.
1053 #
1051 #
1054 # Remember the tip's parents as the first ancestors, since these by
1052 # Remember the tip's parents as the first ancestors, since these by
1055 # definition are not heads.
1053 # definition are not heads.
1056 #
1054 #
1057 # Step backwards from the brach tip through all the revisions. We are
1055 # Step backwards from the brach tip through all the revisions. We are
1058 # guaranteed by the rules of Mercurial that we will now be visiting the
1056 # guaranteed by the rules of Mercurial that we will now be visiting the
1059 # nodes in reverse topological order (children before parents).
1057 # nodes in reverse topological order (children before parents).
1060 #
1058 #
1061 # If a revision is one of the ancestors of a head then we can toss it
1059 # If a revision is one of the ancestors of a head then we can toss it
1062 # out of the ancestors set (we've already found it and won't be
1060 # out of the ancestors set (we've already found it and won't be
1063 # visiting it again) and put its parents in the ancestors set.
1061 # visiting it again) and put its parents in the ancestors set.
1064 #
1062 #
1065 # Otherwise, if a revision is in the branch it's another head, since it
1063 # Otherwise, if a revision is in the branch it's another head, since it
1066 # wasn't in the ancestor list of an existing head. So add it to the
1064 # wasn't in the ancestor list of an existing head. So add it to the
1067 # head list, and add its parents to the ancestor list.
1065 # head list, and add its parents to the ancestor list.
1068 #
1066 #
1069 # If it is not in the branch ignore it.
1067 # If it is not in the branch ignore it.
1070 #
1068 #
1071 # Once we have a list of heads, use nodesbetween to filter out all the
1069 # Once we have a list of heads, use nodesbetween to filter out all the
1072 # heads that cannot be reached from startrev. There may be a more
1070 # heads that cannot be reached from startrev. There may be a more
1073 # efficient way to do this as part of the previous algorithm.
1071 # efficient way to do this as part of the previous algorithm.
1074
1072
1075 set = util.set
1073 set = util.set
1076 heads = [self.changelog.rev(branches[branch])]
1074 heads = [self.changelog.rev(branches[branch])]
1077 # Don't care if ancestors contains nullrev or not.
1075 # Don't care if ancestors contains nullrev or not.
1078 ancestors = set(self.changelog.parentrevs(heads[0]))
1076 ancestors = set(self.changelog.parentrevs(heads[0]))
1079 for rev in xrange(heads[0] - 1, nullrev, -1):
1077 for rev in xrange(heads[0] - 1, nullrev, -1):
1080 if rev in ancestors:
1078 if rev in ancestors:
1081 ancestors.update(self.changelog.parentrevs(rev))
1079 ancestors.update(self.changelog.parentrevs(rev))
1082 ancestors.remove(rev)
1080 ancestors.remove(rev)
1083 elif self.changectx(rev).branch() == branch:
1081 elif self.changectx(rev).branch() == branch:
1084 heads.append(rev)
1082 heads.append(rev)
1085 ancestors.update(self.changelog.parentrevs(rev))
1083 ancestors.update(self.changelog.parentrevs(rev))
1086 heads = [self.changelog.node(rev) for rev in heads]
1084 heads = [self.changelog.node(rev) for rev in heads]
1087 if start is not None:
1085 if start is not None:
1088 heads = self.changelog.nodesbetween([start], heads)[2]
1086 heads = self.changelog.nodesbetween([start], heads)[2]
1089 return heads
1087 return heads
1090
1088
1091 def branches(self, nodes):
1089 def branches(self, nodes):
1092 if not nodes:
1090 if not nodes:
1093 nodes = [self.changelog.tip()]
1091 nodes = [self.changelog.tip()]
1094 b = []
1092 b = []
1095 for n in nodes:
1093 for n in nodes:
1096 t = n
1094 t = n
1097 while 1:
1095 while 1:
1098 p = self.changelog.parents(n)
1096 p = self.changelog.parents(n)
1099 if p[1] != nullid or p[0] == nullid:
1097 if p[1] != nullid or p[0] == nullid:
1100 b.append((t, n, p[0], p[1]))
1098 b.append((t, n, p[0], p[1]))
1101 break
1099 break
1102 n = p[0]
1100 n = p[0]
1103 return b
1101 return b
1104
1102
1105 def between(self, pairs):
1103 def between(self, pairs):
1106 r = []
1104 r = []
1107
1105
1108 for top, bottom in pairs:
1106 for top, bottom in pairs:
1109 n, l, i = top, [], 0
1107 n, l, i = top, [], 0
1110 f = 1
1108 f = 1
1111
1109
1112 while n != bottom:
1110 while n != bottom:
1113 p = self.changelog.parents(n)[0]
1111 p = self.changelog.parents(n)[0]
1114 if i == f:
1112 if i == f:
1115 l.append(n)
1113 l.append(n)
1116 f = f * 2
1114 f = f * 2
1117 n = p
1115 n = p
1118 i += 1
1116 i += 1
1119
1117
1120 r.append(l)
1118 r.append(l)
1121
1119
1122 return r
1120 return r
1123
1121
1124 def findincoming(self, remote, base=None, heads=None, force=False):
1122 def findincoming(self, remote, base=None, heads=None, force=False):
1125 """Return list of roots of the subsets of missing nodes from remote
1123 """Return list of roots of the subsets of missing nodes from remote
1126
1124
1127 If base dict is specified, assume that these nodes and their parents
1125 If base dict is specified, assume that these nodes and their parents
1128 exist on the remote side and that no child of a node of base exists
1126 exist on the remote side and that no child of a node of base exists
1129 in both remote and self.
1127 in both remote and self.
1130 Furthermore base will be updated to include the nodes that exists
1128 Furthermore base will be updated to include the nodes that exists
1131 in self and remote but no children exists in self and remote.
1129 in self and remote but no children exists in self and remote.
1132 If a list of heads is specified, return only nodes which are heads
1130 If a list of heads is specified, return only nodes which are heads
1133 or ancestors of these heads.
1131 or ancestors of these heads.
1134
1132
1135 All the ancestors of base are in self and in remote.
1133 All the ancestors of base are in self and in remote.
1136 All the descendants of the list returned are missing in self.
1134 All the descendants of the list returned are missing in self.
1137 (and so we know that the rest of the nodes are missing in remote, see
1135 (and so we know that the rest of the nodes are missing in remote, see
1138 outgoing)
1136 outgoing)
1139 """
1137 """
1140 m = self.changelog.nodemap
1138 m = self.changelog.nodemap
1141 search = []
1139 search = []
1142 fetch = {}
1140 fetch = {}
1143 seen = {}
1141 seen = {}
1144 seenbranch = {}
1142 seenbranch = {}
1145 if base == None:
1143 if base == None:
1146 base = {}
1144 base = {}
1147
1145
1148 if not heads:
1146 if not heads:
1149 heads = remote.heads()
1147 heads = remote.heads()
1150
1148
1151 if self.changelog.tip() == nullid:
1149 if self.changelog.tip() == nullid:
1152 base[nullid] = 1
1150 base[nullid] = 1
1153 if heads != [nullid]:
1151 if heads != [nullid]:
1154 return [nullid]
1152 return [nullid]
1155 return []
1153 return []
1156
1154
1157 # assume we're closer to the tip than the root
1155 # assume we're closer to the tip than the root
1158 # and start by examining the heads
1156 # and start by examining the heads
1159 self.ui.status(_("searching for changes\n"))
1157 self.ui.status(_("searching for changes\n"))
1160
1158
1161 unknown = []
1159 unknown = []
1162 for h in heads:
1160 for h in heads:
1163 if h not in m:
1161 if h not in m:
1164 unknown.append(h)
1162 unknown.append(h)
1165 else:
1163 else:
1166 base[h] = 1
1164 base[h] = 1
1167
1165
1168 if not unknown:
1166 if not unknown:
1169 return []
1167 return []
1170
1168
1171 req = dict.fromkeys(unknown)
1169 req = dict.fromkeys(unknown)
1172 reqcnt = 0
1170 reqcnt = 0
1173
1171
1174 # search through remote branches
1172 # search through remote branches
1175 # a 'branch' here is a linear segment of history, with four parts:
1173 # a 'branch' here is a linear segment of history, with four parts:
1176 # head, root, first parent, second parent
1174 # head, root, first parent, second parent
1177 # (a branch always has two parents (or none) by definition)
1175 # (a branch always has two parents (or none) by definition)
1178 unknown = remote.branches(unknown)
1176 unknown = remote.branches(unknown)
1179 while unknown:
1177 while unknown:
1180 r = []
1178 r = []
1181 while unknown:
1179 while unknown:
1182 n = unknown.pop(0)
1180 n = unknown.pop(0)
1183 if n[0] in seen:
1181 if n[0] in seen:
1184 continue
1182 continue
1185
1183
1186 self.ui.debug(_("examining %s:%s\n")
1184 self.ui.debug(_("examining %s:%s\n")
1187 % (short(n[0]), short(n[1])))
1185 % (short(n[0]), short(n[1])))
1188 if n[0] == nullid: # found the end of the branch
1186 if n[0] == nullid: # found the end of the branch
1189 pass
1187 pass
1190 elif n in seenbranch:
1188 elif n in seenbranch:
1191 self.ui.debug(_("branch already found\n"))
1189 self.ui.debug(_("branch already found\n"))
1192 continue
1190 continue
1193 elif n[1] and n[1] in m: # do we know the base?
1191 elif n[1] and n[1] in m: # do we know the base?
1194 self.ui.debug(_("found incomplete branch %s:%s\n")
1192 self.ui.debug(_("found incomplete branch %s:%s\n")
1195 % (short(n[0]), short(n[1])))
1193 % (short(n[0]), short(n[1])))
1196 search.append(n) # schedule branch range for scanning
1194 search.append(n) # schedule branch range for scanning
1197 seenbranch[n] = 1
1195 seenbranch[n] = 1
1198 else:
1196 else:
1199 if n[1] not in seen and n[1] not in fetch:
1197 if n[1] not in seen and n[1] not in fetch:
1200 if n[2] in m and n[3] in m:
1198 if n[2] in m and n[3] in m:
1201 self.ui.debug(_("found new changeset %s\n") %
1199 self.ui.debug(_("found new changeset %s\n") %
1202 short(n[1]))
1200 short(n[1]))
1203 fetch[n[1]] = 1 # earliest unknown
1201 fetch[n[1]] = 1 # earliest unknown
1204 for p in n[2:4]:
1202 for p in n[2:4]:
1205 if p in m:
1203 if p in m:
1206 base[p] = 1 # latest known
1204 base[p] = 1 # latest known
1207
1205
1208 for p in n[2:4]:
1206 for p in n[2:4]:
1209 if p not in req and p not in m:
1207 if p not in req and p not in m:
1210 r.append(p)
1208 r.append(p)
1211 req[p] = 1
1209 req[p] = 1
1212 seen[n[0]] = 1
1210 seen[n[0]] = 1
1213
1211
1214 if r:
1212 if r:
1215 reqcnt += 1
1213 reqcnt += 1
1216 self.ui.debug(_("request %d: %s\n") %
1214 self.ui.debug(_("request %d: %s\n") %
1217 (reqcnt, " ".join(map(short, r))))
1215 (reqcnt, " ".join(map(short, r))))
1218 for p in xrange(0, len(r), 10):
1216 for p in xrange(0, len(r), 10):
1219 for b in remote.branches(r[p:p+10]):
1217 for b in remote.branches(r[p:p+10]):
1220 self.ui.debug(_("received %s:%s\n") %
1218 self.ui.debug(_("received %s:%s\n") %
1221 (short(b[0]), short(b[1])))
1219 (short(b[0]), short(b[1])))
1222 unknown.append(b)
1220 unknown.append(b)
1223
1221
1224 # do binary search on the branches we found
1222 # do binary search on the branches we found
1225 while search:
1223 while search:
1226 n = search.pop(0)
1224 n = search.pop(0)
1227 reqcnt += 1
1225 reqcnt += 1
1228 l = remote.between([(n[0], n[1])])[0]
1226 l = remote.between([(n[0], n[1])])[0]
1229 l.append(n[1])
1227 l.append(n[1])
1230 p = n[0]
1228 p = n[0]
1231 f = 1
1229 f = 1
1232 for i in l:
1230 for i in l:
1233 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1231 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1234 if i in m:
1232 if i in m:
1235 if f <= 2:
1233 if f <= 2:
1236 self.ui.debug(_("found new branch changeset %s\n") %
1234 self.ui.debug(_("found new branch changeset %s\n") %
1237 short(p))
1235 short(p))
1238 fetch[p] = 1
1236 fetch[p] = 1
1239 base[i] = 1
1237 base[i] = 1
1240 else:
1238 else:
1241 self.ui.debug(_("narrowed branch search to %s:%s\n")
1239 self.ui.debug(_("narrowed branch search to %s:%s\n")
1242 % (short(p), short(i)))
1240 % (short(p), short(i)))
1243 search.append((p, i))
1241 search.append((p, i))
1244 break
1242 break
1245 p, f = i, f * 2
1243 p, f = i, f * 2
1246
1244
1247 # sanity check our fetch list
1245 # sanity check our fetch list
1248 for f in fetch.keys():
1246 for f in fetch.keys():
1249 if f in m:
1247 if f in m:
1250 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1248 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1251
1249
1252 if base.keys() == [nullid]:
1250 if base.keys() == [nullid]:
1253 if force:
1251 if force:
1254 self.ui.warn(_("warning: repository is unrelated\n"))
1252 self.ui.warn(_("warning: repository is unrelated\n"))
1255 else:
1253 else:
1256 raise util.Abort(_("repository is unrelated"))
1254 raise util.Abort(_("repository is unrelated"))
1257
1255
1258 self.ui.debug(_("found new changesets starting at ") +
1256 self.ui.debug(_("found new changesets starting at ") +
1259 " ".join([short(f) for f in fetch]) + "\n")
1257 " ".join([short(f) for f in fetch]) + "\n")
1260
1258
1261 self.ui.debug(_("%d total queries\n") % reqcnt)
1259 self.ui.debug(_("%d total queries\n") % reqcnt)
1262
1260
1263 return fetch.keys()
1261 return fetch.keys()
1264
1262
1265 def findoutgoing(self, remote, base=None, heads=None, force=False):
1263 def findoutgoing(self, remote, base=None, heads=None, force=False):
1266 """Return list of nodes that are roots of subsets not in remote
1264 """Return list of nodes that are roots of subsets not in remote
1267
1265
1268 If base dict is specified, assume that these nodes and their parents
1266 If base dict is specified, assume that these nodes and their parents
1269 exist on the remote side.
1267 exist on the remote side.
1270 If a list of heads is specified, return only nodes which are heads
1268 If a list of heads is specified, return only nodes which are heads
1271 or ancestors of these heads, and return a second element which
1269 or ancestors of these heads, and return a second element which
1272 contains all remote heads which get new children.
1270 contains all remote heads which get new children.
1273 """
1271 """
1274 if base == None:
1272 if base == None:
1275 base = {}
1273 base = {}
1276 self.findincoming(remote, base, heads, force=force)
1274 self.findincoming(remote, base, heads, force=force)
1277
1275
1278 self.ui.debug(_("common changesets up to ")
1276 self.ui.debug(_("common changesets up to ")
1279 + " ".join(map(short, base.keys())) + "\n")
1277 + " ".join(map(short, base.keys())) + "\n")
1280
1278
1281 remain = dict.fromkeys(self.changelog.nodemap)
1279 remain = dict.fromkeys(self.changelog.nodemap)
1282
1280
1283 # prune everything remote has from the tree
1281 # prune everything remote has from the tree
1284 del remain[nullid]
1282 del remain[nullid]
1285 remove = base.keys()
1283 remove = base.keys()
1286 while remove:
1284 while remove:
1287 n = remove.pop(0)
1285 n = remove.pop(0)
1288 if n in remain:
1286 if n in remain:
1289 del remain[n]
1287 del remain[n]
1290 for p in self.changelog.parents(n):
1288 for p in self.changelog.parents(n):
1291 remove.append(p)
1289 remove.append(p)
1292
1290
1293 # find every node whose parents have been pruned
1291 # find every node whose parents have been pruned
1294 subset = []
1292 subset = []
1295 # find every remote head that will get new children
1293 # find every remote head that will get new children
1296 updated_heads = {}
1294 updated_heads = {}
1297 for n in remain:
1295 for n in remain:
1298 p1, p2 = self.changelog.parents(n)
1296 p1, p2 = self.changelog.parents(n)
1299 if p1 not in remain and p2 not in remain:
1297 if p1 not in remain and p2 not in remain:
1300 subset.append(n)
1298 subset.append(n)
1301 if heads:
1299 if heads:
1302 if p1 in heads:
1300 if p1 in heads:
1303 updated_heads[p1] = True
1301 updated_heads[p1] = True
1304 if p2 in heads:
1302 if p2 in heads:
1305 updated_heads[p2] = True
1303 updated_heads[p2] = True
1306
1304
1307 # this is the set of all roots we have to push
1305 # this is the set of all roots we have to push
1308 if heads:
1306 if heads:
1309 return subset, updated_heads.keys()
1307 return subset, updated_heads.keys()
1310 else:
1308 else:
1311 return subset
1309 return subset
1312
1310
1313 def pull(self, remote, heads=None, force=False, lock=None):
1311 def pull(self, remote, heads=None, force=False, lock=None):
1314 mylock = False
1312 mylock = False
1315 if not lock:
1313 if not lock:
1316 lock = self.lock()
1314 lock = self.lock()
1317 mylock = True
1315 mylock = True
1318
1316
1319 try:
1317 try:
1320 fetch = self.findincoming(remote, force=force)
1318 fetch = self.findincoming(remote, force=force)
1321 if fetch == [nullid]:
1319 if fetch == [nullid]:
1322 self.ui.status(_("requesting all changes\n"))
1320 self.ui.status(_("requesting all changes\n"))
1323
1321
1324 if not fetch:
1322 if not fetch:
1325 self.ui.status(_("no changes found\n"))
1323 self.ui.status(_("no changes found\n"))
1326 return 0
1324 return 0
1327
1325
1328 if heads is None:
1326 if heads is None:
1329 cg = remote.changegroup(fetch, 'pull')
1327 cg = remote.changegroup(fetch, 'pull')
1330 else:
1328 else:
1331 if 'changegroupsubset' not in remote.capabilities:
1329 if 'changegroupsubset' not in remote.capabilities:
1332 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1330 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1333 cg = remote.changegroupsubset(fetch, heads, 'pull')
1331 cg = remote.changegroupsubset(fetch, heads, 'pull')
1334 return self.addchangegroup(cg, 'pull', remote.url())
1332 return self.addchangegroup(cg, 'pull', remote.url())
1335 finally:
1333 finally:
1336 if mylock:
1334 if mylock:
1337 lock.release()
1335 lock.release()
1338
1336
1339 def push(self, remote, force=False, revs=None):
1337 def push(self, remote, force=False, revs=None):
1340 # there are two ways to push to remote repo:
1338 # there are two ways to push to remote repo:
1341 #
1339 #
1342 # addchangegroup assumes local user can lock remote
1340 # addchangegroup assumes local user can lock remote
1343 # repo (local filesystem, old ssh servers).
1341 # repo (local filesystem, old ssh servers).
1344 #
1342 #
1345 # unbundle assumes local user cannot lock remote repo (new ssh
1343 # unbundle assumes local user cannot lock remote repo (new ssh
1346 # servers, http servers).
1344 # servers, http servers).
1347
1345
1348 if remote.capable('unbundle'):
1346 if remote.capable('unbundle'):
1349 return self.push_unbundle(remote, force, revs)
1347 return self.push_unbundle(remote, force, revs)
1350 return self.push_addchangegroup(remote, force, revs)
1348 return self.push_addchangegroup(remote, force, revs)
1351
1349
1352 def prepush(self, remote, force, revs):
1350 def prepush(self, remote, force, revs):
1353 base = {}
1351 base = {}
1354 remote_heads = remote.heads()
1352 remote_heads = remote.heads()
1355 inc = self.findincoming(remote, base, remote_heads, force=force)
1353 inc = self.findincoming(remote, base, remote_heads, force=force)
1356
1354
1357 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1355 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1358 if revs is not None:
1356 if revs is not None:
1359 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1357 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1360 else:
1358 else:
1361 bases, heads = update, self.changelog.heads()
1359 bases, heads = update, self.changelog.heads()
1362
1360
1363 if not bases:
1361 if not bases:
1364 self.ui.status(_("no changes found\n"))
1362 self.ui.status(_("no changes found\n"))
1365 return None, 1
1363 return None, 1
1366 elif not force:
1364 elif not force:
1367 # check if we're creating new remote heads
1365 # check if we're creating new remote heads
1368 # to be a remote head after push, node must be either
1366 # to be a remote head after push, node must be either
1369 # - unknown locally
1367 # - unknown locally
1370 # - a local outgoing head descended from update
1368 # - a local outgoing head descended from update
1371 # - a remote head that's known locally and not
1369 # - a remote head that's known locally and not
1372 # ancestral to an outgoing head
1370 # ancestral to an outgoing head
1373
1371
1374 warn = 0
1372 warn = 0
1375
1373
1376 if remote_heads == [nullid]:
1374 if remote_heads == [nullid]:
1377 warn = 0
1375 warn = 0
1378 elif not revs and len(heads) > len(remote_heads):
1376 elif not revs and len(heads) > len(remote_heads):
1379 warn = 1
1377 warn = 1
1380 else:
1378 else:
1381 newheads = list(heads)
1379 newheads = list(heads)
1382 for r in remote_heads:
1380 for r in remote_heads:
1383 if r in self.changelog.nodemap:
1381 if r in self.changelog.nodemap:
1384 desc = self.changelog.heads(r, heads)
1382 desc = self.changelog.heads(r, heads)
1385 l = [h for h in heads if h in desc]
1383 l = [h for h in heads if h in desc]
1386 if not l:
1384 if not l:
1387 newheads.append(r)
1385 newheads.append(r)
1388 else:
1386 else:
1389 newheads.append(r)
1387 newheads.append(r)
1390 if len(newheads) > len(remote_heads):
1388 if len(newheads) > len(remote_heads):
1391 warn = 1
1389 warn = 1
1392
1390
1393 if warn:
1391 if warn:
1394 self.ui.warn(_("abort: push creates new remote branches!\n"))
1392 self.ui.warn(_("abort: push creates new remote branches!\n"))
1395 self.ui.status(_("(did you forget to merge?"
1393 self.ui.status(_("(did you forget to merge?"
1396 " use push -f to force)\n"))
1394 " use push -f to force)\n"))
1397 return None, 1
1395 return None, 1
1398 elif inc:
1396 elif inc:
1399 self.ui.warn(_("note: unsynced remote changes!\n"))
1397 self.ui.warn(_("note: unsynced remote changes!\n"))
1400
1398
1401
1399
1402 if revs is None:
1400 if revs is None:
1403 cg = self.changegroup(update, 'push')
1401 cg = self.changegroup(update, 'push')
1404 else:
1402 else:
1405 cg = self.changegroupsubset(update, revs, 'push')
1403 cg = self.changegroupsubset(update, revs, 'push')
1406 return cg, remote_heads
1404 return cg, remote_heads
1407
1405
1408 def push_addchangegroup(self, remote, force, revs):
1406 def push_addchangegroup(self, remote, force, revs):
1409 lock = remote.lock()
1407 lock = remote.lock()
1410
1408
1411 ret = self.prepush(remote, force, revs)
1409 ret = self.prepush(remote, force, revs)
1412 if ret[0] is not None:
1410 if ret[0] is not None:
1413 cg, remote_heads = ret
1411 cg, remote_heads = ret
1414 return remote.addchangegroup(cg, 'push', self.url())
1412 return remote.addchangegroup(cg, 'push', self.url())
1415 return ret[1]
1413 return ret[1]
1416
1414
1417 def push_unbundle(self, remote, force, revs):
1415 def push_unbundle(self, remote, force, revs):
1418 # local repo finds heads on server, finds out what revs it
1416 # local repo finds heads on server, finds out what revs it
1419 # must push. once revs transferred, if server finds it has
1417 # must push. once revs transferred, if server finds it has
1420 # different heads (someone else won commit/push race), server
1418 # different heads (someone else won commit/push race), server
1421 # aborts.
1419 # aborts.
1422
1420
1423 ret = self.prepush(remote, force, revs)
1421 ret = self.prepush(remote, force, revs)
1424 if ret[0] is not None:
1422 if ret[0] is not None:
1425 cg, remote_heads = ret
1423 cg, remote_heads = ret
1426 if force: remote_heads = ['force']
1424 if force: remote_heads = ['force']
1427 return remote.unbundle(cg, remote_heads, 'push')
1425 return remote.unbundle(cg, remote_heads, 'push')
1428 return ret[1]
1426 return ret[1]
1429
1427
1430 def changegroupinfo(self, nodes):
1428 def changegroupinfo(self, nodes):
1431 self.ui.note(_("%d changesets found\n") % len(nodes))
1429 self.ui.note(_("%d changesets found\n") % len(nodes))
1432 if self.ui.debugflag:
1430 if self.ui.debugflag:
1433 self.ui.debug(_("List of changesets:\n"))
1431 self.ui.debug(_("List of changesets:\n"))
1434 for node in nodes:
1432 for node in nodes:
1435 self.ui.debug("%s\n" % hex(node))
1433 self.ui.debug("%s\n" % hex(node))
1436
1434
1437 def changegroupsubset(self, bases, heads, source):
1435 def changegroupsubset(self, bases, heads, source):
1438 """This function generates a changegroup consisting of all the nodes
1436 """This function generates a changegroup consisting of all the nodes
1439 that are descendents of any of the bases, and ancestors of any of
1437 that are descendents of any of the bases, and ancestors of any of
1440 the heads.
1438 the heads.
1441
1439
1442 It is fairly complex as determining which filenodes and which
1440 It is fairly complex as determining which filenodes and which
1443 manifest nodes need to be included for the changeset to be complete
1441 manifest nodes need to be included for the changeset to be complete
1444 is non-trivial.
1442 is non-trivial.
1445
1443
1446 Another wrinkle is doing the reverse, figuring out which changeset in
1444 Another wrinkle is doing the reverse, figuring out which changeset in
1447 the changegroup a particular filenode or manifestnode belongs to."""
1445 the changegroup a particular filenode or manifestnode belongs to."""
1448
1446
1449 self.hook('preoutgoing', throw=True, source=source)
1447 self.hook('preoutgoing', throw=True, source=source)
1450
1448
1451 # Set up some initial variables
1449 # Set up some initial variables
1452 # Make it easy to refer to self.changelog
1450 # Make it easy to refer to self.changelog
1453 cl = self.changelog
1451 cl = self.changelog
1454 # msng is short for missing - compute the list of changesets in this
1452 # msng is short for missing - compute the list of changesets in this
1455 # changegroup.
1453 # changegroup.
1456 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1454 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1457 self.changegroupinfo(msng_cl_lst)
1455 self.changegroupinfo(msng_cl_lst)
1458 # Some bases may turn out to be superfluous, and some heads may be
1456 # Some bases may turn out to be superfluous, and some heads may be
1459 # too. nodesbetween will return the minimal set of bases and heads
1457 # too. nodesbetween will return the minimal set of bases and heads
1460 # necessary to re-create the changegroup.
1458 # necessary to re-create the changegroup.
1461
1459
1462 # Known heads are the list of heads that it is assumed the recipient
1460 # Known heads are the list of heads that it is assumed the recipient
1463 # of this changegroup will know about.
1461 # of this changegroup will know about.
1464 knownheads = {}
1462 knownheads = {}
1465 # We assume that all parents of bases are known heads.
1463 # We assume that all parents of bases are known heads.
1466 for n in bases:
1464 for n in bases:
1467 for p in cl.parents(n):
1465 for p in cl.parents(n):
1468 if p != nullid:
1466 if p != nullid:
1469 knownheads[p] = 1
1467 knownheads[p] = 1
1470 knownheads = knownheads.keys()
1468 knownheads = knownheads.keys()
1471 if knownheads:
1469 if knownheads:
1472 # Now that we know what heads are known, we can compute which
1470 # Now that we know what heads are known, we can compute which
1473 # changesets are known. The recipient must know about all
1471 # changesets are known. The recipient must know about all
1474 # changesets required to reach the known heads from the null
1472 # changesets required to reach the known heads from the null
1475 # changeset.
1473 # changeset.
1476 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1474 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1477 junk = None
1475 junk = None
1478 # Transform the list into an ersatz set.
1476 # Transform the list into an ersatz set.
1479 has_cl_set = dict.fromkeys(has_cl_set)
1477 has_cl_set = dict.fromkeys(has_cl_set)
1480 else:
1478 else:
1481 # If there were no known heads, the recipient cannot be assumed to
1479 # If there were no known heads, the recipient cannot be assumed to
1482 # know about any changesets.
1480 # know about any changesets.
1483 has_cl_set = {}
1481 has_cl_set = {}
1484
1482
1485 # Make it easy to refer to self.manifest
1483 # Make it easy to refer to self.manifest
1486 mnfst = self.manifest
1484 mnfst = self.manifest
1487 # We don't know which manifests are missing yet
1485 # We don't know which manifests are missing yet
1488 msng_mnfst_set = {}
1486 msng_mnfst_set = {}
1489 # Nor do we know which filenodes are missing.
1487 # Nor do we know which filenodes are missing.
1490 msng_filenode_set = {}
1488 msng_filenode_set = {}
1491
1489
1492 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1490 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1493 junk = None
1491 junk = None
1494
1492
1495 # A changeset always belongs to itself, so the changenode lookup
1493 # A changeset always belongs to itself, so the changenode lookup
1496 # function for a changenode is identity.
1494 # function for a changenode is identity.
1497 def identity(x):
1495 def identity(x):
1498 return x
1496 return x
1499
1497
1500 # A function generating function. Sets up an environment for the
1498 # A function generating function. Sets up an environment for the
1501 # inner function.
1499 # inner function.
1502 def cmp_by_rev_func(revlog):
1500 def cmp_by_rev_func(revlog):
1503 # Compare two nodes by their revision number in the environment's
1501 # Compare two nodes by their revision number in the environment's
1504 # revision history. Since the revision number both represents the
1502 # revision history. Since the revision number both represents the
1505 # most efficient order to read the nodes in, and represents a
1503 # most efficient order to read the nodes in, and represents a
1506 # topological sorting of the nodes, this function is often useful.
1504 # topological sorting of the nodes, this function is often useful.
1507 def cmp_by_rev(a, b):
1505 def cmp_by_rev(a, b):
1508 return cmp(revlog.rev(a), revlog.rev(b))
1506 return cmp(revlog.rev(a), revlog.rev(b))
1509 return cmp_by_rev
1507 return cmp_by_rev
1510
1508
1511 # If we determine that a particular file or manifest node must be a
1509 # If we determine that a particular file or manifest node must be a
1512 # node that the recipient of the changegroup will already have, we can
1510 # node that the recipient of the changegroup will already have, we can
1513 # also assume the recipient will have all the parents. This function
1511 # also assume the recipient will have all the parents. This function
1514 # prunes them from the set of missing nodes.
1512 # prunes them from the set of missing nodes.
1515 def prune_parents(revlog, hasset, msngset):
1513 def prune_parents(revlog, hasset, msngset):
1516 haslst = hasset.keys()
1514 haslst = hasset.keys()
1517 haslst.sort(cmp_by_rev_func(revlog))
1515 haslst.sort(cmp_by_rev_func(revlog))
1518 for node in haslst:
1516 for node in haslst:
1519 parentlst = [p for p in revlog.parents(node) if p != nullid]
1517 parentlst = [p for p in revlog.parents(node) if p != nullid]
1520 while parentlst:
1518 while parentlst:
1521 n = parentlst.pop()
1519 n = parentlst.pop()
1522 if n not in hasset:
1520 if n not in hasset:
1523 hasset[n] = 1
1521 hasset[n] = 1
1524 p = [p for p in revlog.parents(n) if p != nullid]
1522 p = [p for p in revlog.parents(n) if p != nullid]
1525 parentlst.extend(p)
1523 parentlst.extend(p)
1526 for n in hasset:
1524 for n in hasset:
1527 msngset.pop(n, None)
1525 msngset.pop(n, None)
1528
1526
1529 # This is a function generating function used to set up an environment
1527 # This is a function generating function used to set up an environment
1530 # for the inner function to execute in.
1528 # for the inner function to execute in.
1531 def manifest_and_file_collector(changedfileset):
1529 def manifest_and_file_collector(changedfileset):
1532 # This is an information gathering function that gathers
1530 # This is an information gathering function that gathers
1533 # information from each changeset node that goes out as part of
1531 # information from each changeset node that goes out as part of
1534 # the changegroup. The information gathered is a list of which
1532 # the changegroup. The information gathered is a list of which
1535 # manifest nodes are potentially required (the recipient may
1533 # manifest nodes are potentially required (the recipient may
1536 # already have them) and total list of all files which were
1534 # already have them) and total list of all files which were
1537 # changed in any changeset in the changegroup.
1535 # changed in any changeset in the changegroup.
1538 #
1536 #
1539 # We also remember the first changenode we saw any manifest
1537 # We also remember the first changenode we saw any manifest
1540 # referenced by so we can later determine which changenode 'owns'
1538 # referenced by so we can later determine which changenode 'owns'
1541 # the manifest.
1539 # the manifest.
1542 def collect_manifests_and_files(clnode):
1540 def collect_manifests_and_files(clnode):
1543 c = cl.read(clnode)
1541 c = cl.read(clnode)
1544 for f in c[3]:
1542 for f in c[3]:
1545 # This is to make sure we only have one instance of each
1543 # This is to make sure we only have one instance of each
1546 # filename string for each filename.
1544 # filename string for each filename.
1547 changedfileset.setdefault(f, f)
1545 changedfileset.setdefault(f, f)
1548 msng_mnfst_set.setdefault(c[0], clnode)
1546 msng_mnfst_set.setdefault(c[0], clnode)
1549 return collect_manifests_and_files
1547 return collect_manifests_and_files
1550
1548
1551 # Figure out which manifest nodes (of the ones we think might be part
1549 # Figure out which manifest nodes (of the ones we think might be part
1552 # of the changegroup) the recipient must know about and remove them
1550 # of the changegroup) the recipient must know about and remove them
1553 # from the changegroup.
1551 # from the changegroup.
1554 def prune_manifests():
1552 def prune_manifests():
1555 has_mnfst_set = {}
1553 has_mnfst_set = {}
1556 for n in msng_mnfst_set:
1554 for n in msng_mnfst_set:
1557 # If a 'missing' manifest thinks it belongs to a changenode
1555 # If a 'missing' manifest thinks it belongs to a changenode
1558 # the recipient is assumed to have, obviously the recipient
1556 # the recipient is assumed to have, obviously the recipient
1559 # must have that manifest.
1557 # must have that manifest.
1560 linknode = cl.node(mnfst.linkrev(n))
1558 linknode = cl.node(mnfst.linkrev(n))
1561 if linknode in has_cl_set:
1559 if linknode in has_cl_set:
1562 has_mnfst_set[n] = 1
1560 has_mnfst_set[n] = 1
1563 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1561 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1564
1562
1565 # Use the information collected in collect_manifests_and_files to say
1563 # Use the information collected in collect_manifests_and_files to say
1566 # which changenode any manifestnode belongs to.
1564 # which changenode any manifestnode belongs to.
1567 def lookup_manifest_link(mnfstnode):
1565 def lookup_manifest_link(mnfstnode):
1568 return msng_mnfst_set[mnfstnode]
1566 return msng_mnfst_set[mnfstnode]
1569
1567
1570 # A function generating function that sets up the initial environment
1568 # A function generating function that sets up the initial environment
1571 # the inner function.
1569 # the inner function.
1572 def filenode_collector(changedfiles):
1570 def filenode_collector(changedfiles):
1573 next_rev = [0]
1571 next_rev = [0]
1574 # This gathers information from each manifestnode included in the
1572 # This gathers information from each manifestnode included in the
1575 # changegroup about which filenodes the manifest node references
1573 # changegroup about which filenodes the manifest node references
1576 # so we can include those in the changegroup too.
1574 # so we can include those in the changegroup too.
1577 #
1575 #
1578 # It also remembers which changenode each filenode belongs to. It
1576 # It also remembers which changenode each filenode belongs to. It
1579 # does this by assuming the a filenode belongs to the changenode
1577 # does this by assuming the a filenode belongs to the changenode
1580 # the first manifest that references it belongs to.
1578 # the first manifest that references it belongs to.
1581 def collect_msng_filenodes(mnfstnode):
1579 def collect_msng_filenodes(mnfstnode):
1582 r = mnfst.rev(mnfstnode)
1580 r = mnfst.rev(mnfstnode)
1583 if r == next_rev[0]:
1581 if r == next_rev[0]:
1584 # If the last rev we looked at was the one just previous,
1582 # If the last rev we looked at was the one just previous,
1585 # we only need to see a diff.
1583 # we only need to see a diff.
1586 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1584 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1587 # For each line in the delta
1585 # For each line in the delta
1588 for dline in delta.splitlines():
1586 for dline in delta.splitlines():
1589 # get the filename and filenode for that line
1587 # get the filename and filenode for that line
1590 f, fnode = dline.split('\0')
1588 f, fnode = dline.split('\0')
1591 fnode = bin(fnode[:40])
1589 fnode = bin(fnode[:40])
1592 f = changedfiles.get(f, None)
1590 f = changedfiles.get(f, None)
1593 # And if the file is in the list of files we care
1591 # And if the file is in the list of files we care
1594 # about.
1592 # about.
1595 if f is not None:
1593 if f is not None:
1596 # Get the changenode this manifest belongs to
1594 # Get the changenode this manifest belongs to
1597 clnode = msng_mnfst_set[mnfstnode]
1595 clnode = msng_mnfst_set[mnfstnode]
1598 # Create the set of filenodes for the file if
1596 # Create the set of filenodes for the file if
1599 # there isn't one already.
1597 # there isn't one already.
1600 ndset = msng_filenode_set.setdefault(f, {})
1598 ndset = msng_filenode_set.setdefault(f, {})
1601 # And set the filenode's changelog node to the
1599 # And set the filenode's changelog node to the
1602 # manifest's if it hasn't been set already.
1600 # manifest's if it hasn't been set already.
1603 ndset.setdefault(fnode, clnode)
1601 ndset.setdefault(fnode, clnode)
1604 else:
1602 else:
1605 # Otherwise we need a full manifest.
1603 # Otherwise we need a full manifest.
1606 m = mnfst.read(mnfstnode)
1604 m = mnfst.read(mnfstnode)
1607 # For every file in we care about.
1605 # For every file in we care about.
1608 for f in changedfiles:
1606 for f in changedfiles:
1609 fnode = m.get(f, None)
1607 fnode = m.get(f, None)
1610 # If it's in the manifest
1608 # If it's in the manifest
1611 if fnode is not None:
1609 if fnode is not None:
1612 # See comments above.
1610 # See comments above.
1613 clnode = msng_mnfst_set[mnfstnode]
1611 clnode = msng_mnfst_set[mnfstnode]
1614 ndset = msng_filenode_set.setdefault(f, {})
1612 ndset = msng_filenode_set.setdefault(f, {})
1615 ndset.setdefault(fnode, clnode)
1613 ndset.setdefault(fnode, clnode)
1616 # Remember the revision we hope to see next.
1614 # Remember the revision we hope to see next.
1617 next_rev[0] = r + 1
1615 next_rev[0] = r + 1
1618 return collect_msng_filenodes
1616 return collect_msng_filenodes
1619
1617
1620 # We have a list of filenodes we think we need for a file, lets remove
1618 # We have a list of filenodes we think we need for a file, lets remove
1621 # all those we now the recipient must have.
1619 # all those we now the recipient must have.
1622 def prune_filenodes(f, filerevlog):
1620 def prune_filenodes(f, filerevlog):
1623 msngset = msng_filenode_set[f]
1621 msngset = msng_filenode_set[f]
1624 hasset = {}
1622 hasset = {}
1625 # If a 'missing' filenode thinks it belongs to a changenode we
1623 # If a 'missing' filenode thinks it belongs to a changenode we
1626 # assume the recipient must have, then the recipient must have
1624 # assume the recipient must have, then the recipient must have
1627 # that filenode.
1625 # that filenode.
1628 for n in msngset:
1626 for n in msngset:
1629 clnode = cl.node(filerevlog.linkrev(n))
1627 clnode = cl.node(filerevlog.linkrev(n))
1630 if clnode in has_cl_set:
1628 if clnode in has_cl_set:
1631 hasset[n] = 1
1629 hasset[n] = 1
1632 prune_parents(filerevlog, hasset, msngset)
1630 prune_parents(filerevlog, hasset, msngset)
1633
1631
1634 # A function generator function that sets up the a context for the
1632 # A function generator function that sets up the a context for the
1635 # inner function.
1633 # inner function.
1636 def lookup_filenode_link_func(fname):
1634 def lookup_filenode_link_func(fname):
1637 msngset = msng_filenode_set[fname]
1635 msngset = msng_filenode_set[fname]
1638 # Lookup the changenode the filenode belongs to.
1636 # Lookup the changenode the filenode belongs to.
1639 def lookup_filenode_link(fnode):
1637 def lookup_filenode_link(fnode):
1640 return msngset[fnode]
1638 return msngset[fnode]
1641 return lookup_filenode_link
1639 return lookup_filenode_link
1642
1640
1643 # Now that we have all theses utility functions to help out and
1641 # Now that we have all theses utility functions to help out and
1644 # logically divide up the task, generate the group.
1642 # logically divide up the task, generate the group.
1645 def gengroup():
1643 def gengroup():
1646 # The set of changed files starts empty.
1644 # The set of changed files starts empty.
1647 changedfiles = {}
1645 changedfiles = {}
1648 # Create a changenode group generator that will call our functions
1646 # Create a changenode group generator that will call our functions
1649 # back to lookup the owning changenode and collect information.
1647 # back to lookup the owning changenode and collect information.
1650 group = cl.group(msng_cl_lst, identity,
1648 group = cl.group(msng_cl_lst, identity,
1651 manifest_and_file_collector(changedfiles))
1649 manifest_and_file_collector(changedfiles))
1652 for chnk in group:
1650 for chnk in group:
1653 yield chnk
1651 yield chnk
1654
1652
1655 # The list of manifests has been collected by the generator
1653 # The list of manifests has been collected by the generator
1656 # calling our functions back.
1654 # calling our functions back.
1657 prune_manifests()
1655 prune_manifests()
1658 msng_mnfst_lst = msng_mnfst_set.keys()
1656 msng_mnfst_lst = msng_mnfst_set.keys()
1659 # Sort the manifestnodes by revision number.
1657 # Sort the manifestnodes by revision number.
1660 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1658 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1661 # Create a generator for the manifestnodes that calls our lookup
1659 # Create a generator for the manifestnodes that calls our lookup
1662 # and data collection functions back.
1660 # and data collection functions back.
1663 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1661 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1664 filenode_collector(changedfiles))
1662 filenode_collector(changedfiles))
1665 for chnk in group:
1663 for chnk in group:
1666 yield chnk
1664 yield chnk
1667
1665
1668 # These are no longer needed, dereference and toss the memory for
1666 # These are no longer needed, dereference and toss the memory for
1669 # them.
1667 # them.
1670 msng_mnfst_lst = None
1668 msng_mnfst_lst = None
1671 msng_mnfst_set.clear()
1669 msng_mnfst_set.clear()
1672
1670
1673 changedfiles = changedfiles.keys()
1671 changedfiles = changedfiles.keys()
1674 changedfiles.sort()
1672 changedfiles.sort()
1675 # Go through all our files in order sorted by name.
1673 # Go through all our files in order sorted by name.
1676 for fname in changedfiles:
1674 for fname in changedfiles:
1677 filerevlog = self.file(fname)
1675 filerevlog = self.file(fname)
1678 # Toss out the filenodes that the recipient isn't really
1676 # Toss out the filenodes that the recipient isn't really
1679 # missing.
1677 # missing.
1680 if msng_filenode_set.has_key(fname):
1678 if msng_filenode_set.has_key(fname):
1681 prune_filenodes(fname, filerevlog)
1679 prune_filenodes(fname, filerevlog)
1682 msng_filenode_lst = msng_filenode_set[fname].keys()
1680 msng_filenode_lst = msng_filenode_set[fname].keys()
1683 else:
1681 else:
1684 msng_filenode_lst = []
1682 msng_filenode_lst = []
1685 # If any filenodes are left, generate the group for them,
1683 # If any filenodes are left, generate the group for them,
1686 # otherwise don't bother.
1684 # otherwise don't bother.
1687 if len(msng_filenode_lst) > 0:
1685 if len(msng_filenode_lst) > 0:
1688 yield changegroup.genchunk(fname)
1686 yield changegroup.genchunk(fname)
1689 # Sort the filenodes by their revision #
1687 # Sort the filenodes by their revision #
1690 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1688 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1691 # Create a group generator and only pass in a changenode
1689 # Create a group generator and only pass in a changenode
1692 # lookup function as we need to collect no information
1690 # lookup function as we need to collect no information
1693 # from filenodes.
1691 # from filenodes.
1694 group = filerevlog.group(msng_filenode_lst,
1692 group = filerevlog.group(msng_filenode_lst,
1695 lookup_filenode_link_func(fname))
1693 lookup_filenode_link_func(fname))
1696 for chnk in group:
1694 for chnk in group:
1697 yield chnk
1695 yield chnk
1698 if msng_filenode_set.has_key(fname):
1696 if msng_filenode_set.has_key(fname):
1699 # Don't need this anymore, toss it to free memory.
1697 # Don't need this anymore, toss it to free memory.
1700 del msng_filenode_set[fname]
1698 del msng_filenode_set[fname]
1701 # Signal that no more groups are left.
1699 # Signal that no more groups are left.
1702 yield changegroup.closechunk()
1700 yield changegroup.closechunk()
1703
1701
1704 if msng_cl_lst:
1702 if msng_cl_lst:
1705 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1703 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1706
1704
1707 return util.chunkbuffer(gengroup())
1705 return util.chunkbuffer(gengroup())
1708
1706
1709 def changegroup(self, basenodes, source):
1707 def changegroup(self, basenodes, source):
1710 """Generate a changegroup of all nodes that we have that a recipient
1708 """Generate a changegroup of all nodes that we have that a recipient
1711 doesn't.
1709 doesn't.
1712
1710
1713 This is much easier than the previous function as we can assume that
1711 This is much easier than the previous function as we can assume that
1714 the recipient has any changenode we aren't sending them."""
1712 the recipient has any changenode we aren't sending them."""
1715
1713
1716 self.hook('preoutgoing', throw=True, source=source)
1714 self.hook('preoutgoing', throw=True, source=source)
1717
1715
1718 cl = self.changelog
1716 cl = self.changelog
1719 nodes = cl.nodesbetween(basenodes, None)[0]
1717 nodes = cl.nodesbetween(basenodes, None)[0]
1720 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1718 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1721 self.changegroupinfo(nodes)
1719 self.changegroupinfo(nodes)
1722
1720
1723 def identity(x):
1721 def identity(x):
1724 return x
1722 return x
1725
1723
1726 def gennodelst(revlog):
1724 def gennodelst(revlog):
1727 for r in xrange(0, revlog.count()):
1725 for r in xrange(0, revlog.count()):
1728 n = revlog.node(r)
1726 n = revlog.node(r)
1729 if revlog.linkrev(n) in revset:
1727 if revlog.linkrev(n) in revset:
1730 yield n
1728 yield n
1731
1729
1732 def changed_file_collector(changedfileset):
1730 def changed_file_collector(changedfileset):
1733 def collect_changed_files(clnode):
1731 def collect_changed_files(clnode):
1734 c = cl.read(clnode)
1732 c = cl.read(clnode)
1735 for fname in c[3]:
1733 for fname in c[3]:
1736 changedfileset[fname] = 1
1734 changedfileset[fname] = 1
1737 return collect_changed_files
1735 return collect_changed_files
1738
1736
1739 def lookuprevlink_func(revlog):
1737 def lookuprevlink_func(revlog):
1740 def lookuprevlink(n):
1738 def lookuprevlink(n):
1741 return cl.node(revlog.linkrev(n))
1739 return cl.node(revlog.linkrev(n))
1742 return lookuprevlink
1740 return lookuprevlink
1743
1741
1744 def gengroup():
1742 def gengroup():
1745 # construct a list of all changed files
1743 # construct a list of all changed files
1746 changedfiles = {}
1744 changedfiles = {}
1747
1745
1748 for chnk in cl.group(nodes, identity,
1746 for chnk in cl.group(nodes, identity,
1749 changed_file_collector(changedfiles)):
1747 changed_file_collector(changedfiles)):
1750 yield chnk
1748 yield chnk
1751 changedfiles = changedfiles.keys()
1749 changedfiles = changedfiles.keys()
1752 changedfiles.sort()
1750 changedfiles.sort()
1753
1751
1754 mnfst = self.manifest
1752 mnfst = self.manifest
1755 nodeiter = gennodelst(mnfst)
1753 nodeiter = gennodelst(mnfst)
1756 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1754 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1757 yield chnk
1755 yield chnk
1758
1756
1759 for fname in changedfiles:
1757 for fname in changedfiles:
1760 filerevlog = self.file(fname)
1758 filerevlog = self.file(fname)
1761 nodeiter = gennodelst(filerevlog)
1759 nodeiter = gennodelst(filerevlog)
1762 nodeiter = list(nodeiter)
1760 nodeiter = list(nodeiter)
1763 if nodeiter:
1761 if nodeiter:
1764 yield changegroup.genchunk(fname)
1762 yield changegroup.genchunk(fname)
1765 lookup = lookuprevlink_func(filerevlog)
1763 lookup = lookuprevlink_func(filerevlog)
1766 for chnk in filerevlog.group(nodeiter, lookup):
1764 for chnk in filerevlog.group(nodeiter, lookup):
1767 yield chnk
1765 yield chnk
1768
1766
1769 yield changegroup.closechunk()
1767 yield changegroup.closechunk()
1770
1768
1771 if nodes:
1769 if nodes:
1772 self.hook('outgoing', node=hex(nodes[0]), source=source)
1770 self.hook('outgoing', node=hex(nodes[0]), source=source)
1773
1771
1774 return util.chunkbuffer(gengroup())
1772 return util.chunkbuffer(gengroup())
1775
1773
1776 def addchangegroup(self, source, srctype, url):
1774 def addchangegroup(self, source, srctype, url):
1777 """add changegroup to repo.
1775 """add changegroup to repo.
1778
1776
1779 return values:
1777 return values:
1780 - nothing changed or no source: 0
1778 - nothing changed or no source: 0
1781 - more heads than before: 1+added heads (2..n)
1779 - more heads than before: 1+added heads (2..n)
1782 - less heads than before: -1-removed heads (-2..-n)
1780 - less heads than before: -1-removed heads (-2..-n)
1783 - number of heads stays the same: 1
1781 - number of heads stays the same: 1
1784 """
1782 """
1785 def csmap(x):
1783 def csmap(x):
1786 self.ui.debug(_("add changeset %s\n") % short(x))
1784 self.ui.debug(_("add changeset %s\n") % short(x))
1787 return cl.count()
1785 return cl.count()
1788
1786
1789 def revmap(x):
1787 def revmap(x):
1790 return cl.rev(x)
1788 return cl.rev(x)
1791
1789
1792 if not source:
1790 if not source:
1793 return 0
1791 return 0
1794
1792
1795 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1793 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1796
1794
1797 changesets = files = revisions = 0
1795 changesets = files = revisions = 0
1798
1796
1799 tr = self.transaction()
1797 tr = self.transaction()
1800
1798
1801 # write changelog data to temp files so concurrent readers will not see
1799 # write changelog data to temp files so concurrent readers will not see
1802 # inconsistent view
1800 # inconsistent view
1803 cl = self.changelog
1801 cl = self.changelog
1804 cl.delayupdate()
1802 cl.delayupdate()
1805 oldheads = len(cl.heads())
1803 oldheads = len(cl.heads())
1806
1804
1807 # pull off the changeset group
1805 # pull off the changeset group
1808 self.ui.status(_("adding changesets\n"))
1806 self.ui.status(_("adding changesets\n"))
1809 cor = cl.count() - 1
1807 cor = cl.count() - 1
1810 chunkiter = changegroup.chunkiter(source)
1808 chunkiter = changegroup.chunkiter(source)
1811 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1809 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1812 raise util.Abort(_("received changelog group is empty"))
1810 raise util.Abort(_("received changelog group is empty"))
1813 cnr = cl.count() - 1
1811 cnr = cl.count() - 1
1814 changesets = cnr - cor
1812 changesets = cnr - cor
1815
1813
1816 # pull off the manifest group
1814 # pull off the manifest group
1817 self.ui.status(_("adding manifests\n"))
1815 self.ui.status(_("adding manifests\n"))
1818 chunkiter = changegroup.chunkiter(source)
1816 chunkiter = changegroup.chunkiter(source)
1819 # no need to check for empty manifest group here:
1817 # no need to check for empty manifest group here:
1820 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1818 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1821 # no new manifest will be created and the manifest group will
1819 # no new manifest will be created and the manifest group will
1822 # be empty during the pull
1820 # be empty during the pull
1823 self.manifest.addgroup(chunkiter, revmap, tr)
1821 self.manifest.addgroup(chunkiter, revmap, tr)
1824
1822
1825 # process the files
1823 # process the files
1826 self.ui.status(_("adding file changes\n"))
1824 self.ui.status(_("adding file changes\n"))
1827 while 1:
1825 while 1:
1828 f = changegroup.getchunk(source)
1826 f = changegroup.getchunk(source)
1829 if not f:
1827 if not f:
1830 break
1828 break
1831 self.ui.debug(_("adding %s revisions\n") % f)
1829 self.ui.debug(_("adding %s revisions\n") % f)
1832 fl = self.file(f)
1830 fl = self.file(f)
1833 o = fl.count()
1831 o = fl.count()
1834 chunkiter = changegroup.chunkiter(source)
1832 chunkiter = changegroup.chunkiter(source)
1835 if fl.addgroup(chunkiter, revmap, tr) is None:
1833 if fl.addgroup(chunkiter, revmap, tr) is None:
1836 raise util.Abort(_("received file revlog group is empty"))
1834 raise util.Abort(_("received file revlog group is empty"))
1837 revisions += fl.count() - o
1835 revisions += fl.count() - o
1838 files += 1
1836 files += 1
1839
1837
1840 # make changelog see real files again
1838 # make changelog see real files again
1841 cl.finalize(tr)
1839 cl.finalize(tr)
1842
1840
1843 newheads = len(self.changelog.heads())
1841 newheads = len(self.changelog.heads())
1844 heads = ""
1842 heads = ""
1845 if oldheads and newheads != oldheads:
1843 if oldheads and newheads != oldheads:
1846 heads = _(" (%+d heads)") % (newheads - oldheads)
1844 heads = _(" (%+d heads)") % (newheads - oldheads)
1847
1845
1848 self.ui.status(_("added %d changesets"
1846 self.ui.status(_("added %d changesets"
1849 " with %d changes to %d files%s\n")
1847 " with %d changes to %d files%s\n")
1850 % (changesets, revisions, files, heads))
1848 % (changesets, revisions, files, heads))
1851
1849
1852 if changesets > 0:
1850 if changesets > 0:
1853 self.hook('pretxnchangegroup', throw=True,
1851 self.hook('pretxnchangegroup', throw=True,
1854 node=hex(self.changelog.node(cor+1)), source=srctype,
1852 node=hex(self.changelog.node(cor+1)), source=srctype,
1855 url=url)
1853 url=url)
1856
1854
1857 tr.close()
1855 tr.close()
1858
1856
1859 if changesets > 0:
1857 if changesets > 0:
1860 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1858 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1861 source=srctype, url=url)
1859 source=srctype, url=url)
1862
1860
1863 for i in xrange(cor + 1, cnr + 1):
1861 for i in xrange(cor + 1, cnr + 1):
1864 self.hook("incoming", node=hex(self.changelog.node(i)),
1862 self.hook("incoming", node=hex(self.changelog.node(i)),
1865 source=srctype, url=url)
1863 source=srctype, url=url)
1866
1864
1867 # never return 0 here:
1865 # never return 0 here:
1868 if newheads < oldheads:
1866 if newheads < oldheads:
1869 return newheads - oldheads - 1
1867 return newheads - oldheads - 1
1870 else:
1868 else:
1871 return newheads - oldheads + 1
1869 return newheads - oldheads + 1
1872
1870
1873
1871
1874 def stream_in(self, remote):
1872 def stream_in(self, remote):
1875 fp = remote.stream_out()
1873 fp = remote.stream_out()
1876 l = fp.readline()
1874 l = fp.readline()
1877 try:
1875 try:
1878 resp = int(l)
1876 resp = int(l)
1879 except ValueError:
1877 except ValueError:
1880 raise util.UnexpectedOutput(
1878 raise util.UnexpectedOutput(
1881 _('Unexpected response from remote server:'), l)
1879 _('Unexpected response from remote server:'), l)
1882 if resp == 1:
1880 if resp == 1:
1883 raise util.Abort(_('operation forbidden by server'))
1881 raise util.Abort(_('operation forbidden by server'))
1884 elif resp == 2:
1882 elif resp == 2:
1885 raise util.Abort(_('locking the remote repository failed'))
1883 raise util.Abort(_('locking the remote repository failed'))
1886 elif resp != 0:
1884 elif resp != 0:
1887 raise util.Abort(_('the server sent an unknown error code'))
1885 raise util.Abort(_('the server sent an unknown error code'))
1888 self.ui.status(_('streaming all changes\n'))
1886 self.ui.status(_('streaming all changes\n'))
1889 l = fp.readline()
1887 l = fp.readline()
1890 try:
1888 try:
1891 total_files, total_bytes = map(int, l.split(' ', 1))
1889 total_files, total_bytes = map(int, l.split(' ', 1))
1892 except ValueError, TypeError:
1890 except ValueError, TypeError:
1893 raise util.UnexpectedOutput(
1891 raise util.UnexpectedOutput(
1894 _('Unexpected response from remote server:'), l)
1892 _('Unexpected response from remote server:'), l)
1895 self.ui.status(_('%d files to transfer, %s of data\n') %
1893 self.ui.status(_('%d files to transfer, %s of data\n') %
1896 (total_files, util.bytecount(total_bytes)))
1894 (total_files, util.bytecount(total_bytes)))
1897 start = time.time()
1895 start = time.time()
1898 for i in xrange(total_files):
1896 for i in xrange(total_files):
1899 # XXX doesn't support '\n' or '\r' in filenames
1897 # XXX doesn't support '\n' or '\r' in filenames
1900 l = fp.readline()
1898 l = fp.readline()
1901 try:
1899 try:
1902 name, size = l.split('\0', 1)
1900 name, size = l.split('\0', 1)
1903 size = int(size)
1901 size = int(size)
1904 except ValueError, TypeError:
1902 except ValueError, TypeError:
1905 raise util.UnexpectedOutput(
1903 raise util.UnexpectedOutput(
1906 _('Unexpected response from remote server:'), l)
1904 _('Unexpected response from remote server:'), l)
1907 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1905 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1908 ofp = self.sopener(name, 'w')
1906 ofp = self.sopener(name, 'w')
1909 for chunk in util.filechunkiter(fp, limit=size):
1907 for chunk in util.filechunkiter(fp, limit=size):
1910 ofp.write(chunk)
1908 ofp.write(chunk)
1911 ofp.close()
1909 ofp.close()
1912 elapsed = time.time() - start
1910 elapsed = time.time() - start
1913 if elapsed <= 0:
1911 if elapsed <= 0:
1914 elapsed = 0.001
1912 elapsed = 0.001
1915 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1913 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1916 (util.bytecount(total_bytes), elapsed,
1914 (util.bytecount(total_bytes), elapsed,
1917 util.bytecount(total_bytes / elapsed)))
1915 util.bytecount(total_bytes / elapsed)))
1918 self.invalidate()
1916 self.invalidate()
1919 return len(self.heads()) + 1
1917 return len(self.heads()) + 1
1920
1918
1921 def clone(self, remote, heads=[], stream=False):
1919 def clone(self, remote, heads=[], stream=False):
1922 '''clone remote repository.
1920 '''clone remote repository.
1923
1921
1924 keyword arguments:
1922 keyword arguments:
1925 heads: list of revs to clone (forces use of pull)
1923 heads: list of revs to clone (forces use of pull)
1926 stream: use streaming clone if possible'''
1924 stream: use streaming clone if possible'''
1927
1925
1928 # now, all clients that can request uncompressed clones can
1926 # now, all clients that can request uncompressed clones can
1929 # read repo formats supported by all servers that can serve
1927 # read repo formats supported by all servers that can serve
1930 # them.
1928 # them.
1931
1929
1932 # if revlog format changes, client will have to check version
1930 # if revlog format changes, client will have to check version
1933 # and format flags on "stream" capability, and use
1931 # and format flags on "stream" capability, and use
1934 # uncompressed only if compatible.
1932 # uncompressed only if compatible.
1935
1933
1936 if stream and not heads and remote.capable('stream'):
1934 if stream and not heads and remote.capable('stream'):
1937 return self.stream_in(remote)
1935 return self.stream_in(remote)
1938 return self.pull(remote, heads)
1936 return self.pull(remote, heads)
1939
1937
1940 # used to avoid circular references so destructors work
1938 # used to avoid circular references so destructors work
1941 def aftertrans(files):
1939 def aftertrans(files):
1942 renamefiles = [tuple(t) for t in files]
1940 renamefiles = [tuple(t) for t in files]
1943 def a():
1941 def a():
1944 for src, dest in renamefiles:
1942 for src, dest in renamefiles:
1945 util.rename(src, dest)
1943 util.rename(src, dest)
1946 return a
1944 return a
1947
1945
1948 def instance(ui, path, create):
1946 def instance(ui, path, create):
1949 return localrepository(ui, util.drop_scheme('file', path), create)
1947 return localrepository(ui, util.drop_scheme('file', path), create)
1950
1948
1951 def islocal(path):
1949 def islocal(path):
1952 return True
1950 return True
General Comments 0
You need to be logged in to leave comments. Login now