##// END OF EJS Templates
make the journal/undo files from transactions inherit the mode from .hg/store
Alexis S. L. Carvalho -
r6065:53ed9b40 default
parent child Browse files
Show More
@@ -1,2098 +1,2100
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71
71
72 try:
72 try:
73 # files in .hg/ will be created using this mode
73 # files in .hg/ will be created using this mode
74 mode = os.stat(self.spath).st_mode
74 mode = os.stat(self.spath).st_mode
75 # avoid some useless chmods
75 # avoid some useless chmods
76 if (0777 & ~util._umask) == (0777 & mode):
76 if (0777 & ~util._umask) == (0777 & mode):
77 mode = None
77 mode = None
78 except OSError:
78 except OSError:
79 mode = None
79 mode = None
80
80
81 self._createmode = mode
81 self.opener.createmode = mode
82 self.opener.createmode = mode
82 sopener = util.opener(self.spath)
83 sopener = util.opener(self.spath)
83 sopener.createmode = mode
84 sopener.createmode = mode
84 self.sopener = util.encodedopener(sopener, self.encodefn)
85 self.sopener = util.encodedopener(sopener, self.encodefn)
85
86
86 self.ui = ui.ui(parentui=parentui)
87 self.ui = ui.ui(parentui=parentui)
87 try:
88 try:
88 self.ui.readconfig(self.join("hgrc"), self.root)
89 self.ui.readconfig(self.join("hgrc"), self.root)
89 extensions.loadall(self.ui)
90 extensions.loadall(self.ui)
90 except IOError:
91 except IOError:
91 pass
92 pass
92
93
93 self.tagscache = None
94 self.tagscache = None
94 self._tagstypecache = None
95 self._tagstypecache = None
95 self.branchcache = None
96 self.branchcache = None
96 self.nodetagscache = None
97 self.nodetagscache = None
97 self.filterpats = {}
98 self.filterpats = {}
98 self._datafilters = {}
99 self._datafilters = {}
99 self._transref = self._lockref = self._wlockref = None
100 self._transref = self._lockref = self._wlockref = None
100
101
101 def __getattr__(self, name):
102 def __getattr__(self, name):
102 if name == 'changelog':
103 if name == 'changelog':
103 self.changelog = changelog.changelog(self.sopener)
104 self.changelog = changelog.changelog(self.sopener)
104 self.sopener.defversion = self.changelog.version
105 self.sopener.defversion = self.changelog.version
105 return self.changelog
106 return self.changelog
106 if name == 'manifest':
107 if name == 'manifest':
107 self.changelog
108 self.changelog
108 self.manifest = manifest.manifest(self.sopener)
109 self.manifest = manifest.manifest(self.sopener)
109 return self.manifest
110 return self.manifest
110 if name == 'dirstate':
111 if name == 'dirstate':
111 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
112 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
112 return self.dirstate
113 return self.dirstate
113 else:
114 else:
114 raise AttributeError, name
115 raise AttributeError, name
115
116
116 def url(self):
117 def url(self):
117 return 'file:' + self.root
118 return 'file:' + self.root
118
119
119 def hook(self, name, throw=False, **args):
120 def hook(self, name, throw=False, **args):
120 return hook.hook(self.ui, self, name, throw, **args)
121 return hook.hook(self.ui, self, name, throw, **args)
121
122
122 tag_disallowed = ':\r\n'
123 tag_disallowed = ':\r\n'
123
124
124 def _tag(self, name, node, message, local, user, date, parent=None,
125 def _tag(self, name, node, message, local, user, date, parent=None,
125 extra={}):
126 extra={}):
126 use_dirstate = parent is None
127 use_dirstate = parent is None
127
128
128 for c in self.tag_disallowed:
129 for c in self.tag_disallowed:
129 if c in name:
130 if c in name:
130 raise util.Abort(_('%r cannot be used in a tag name') % c)
131 raise util.Abort(_('%r cannot be used in a tag name') % c)
131
132
132 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
133 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
133
134
134 def writetag(fp, name, munge, prevtags):
135 def writetag(fp, name, munge, prevtags):
135 fp.seek(0, 2)
136 fp.seek(0, 2)
136 if prevtags and prevtags[-1] != '\n':
137 if prevtags and prevtags[-1] != '\n':
137 fp.write('\n')
138 fp.write('\n')
138 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
139 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
139 fp.close()
140 fp.close()
140
141
141 prevtags = ''
142 prevtags = ''
142 if local:
143 if local:
143 try:
144 try:
144 fp = self.opener('localtags', 'r+')
145 fp = self.opener('localtags', 'r+')
145 except IOError, err:
146 except IOError, err:
146 fp = self.opener('localtags', 'a')
147 fp = self.opener('localtags', 'a')
147 else:
148 else:
148 prevtags = fp.read()
149 prevtags = fp.read()
149
150
150 # local tags are stored in the current charset
151 # local tags are stored in the current charset
151 writetag(fp, name, None, prevtags)
152 writetag(fp, name, None, prevtags)
152 self.hook('tag', node=hex(node), tag=name, local=local)
153 self.hook('tag', node=hex(node), tag=name, local=local)
153 return
154 return
154
155
155 if use_dirstate:
156 if use_dirstate:
156 try:
157 try:
157 fp = self.wfile('.hgtags', 'rb+')
158 fp = self.wfile('.hgtags', 'rb+')
158 except IOError, err:
159 except IOError, err:
159 fp = self.wfile('.hgtags', 'ab')
160 fp = self.wfile('.hgtags', 'ab')
160 else:
161 else:
161 prevtags = fp.read()
162 prevtags = fp.read()
162 else:
163 else:
163 try:
164 try:
164 prevtags = self.filectx('.hgtags', parent).data()
165 prevtags = self.filectx('.hgtags', parent).data()
165 except revlog.LookupError:
166 except revlog.LookupError:
166 pass
167 pass
167 fp = self.wfile('.hgtags', 'wb')
168 fp = self.wfile('.hgtags', 'wb')
168 if prevtags:
169 if prevtags:
169 fp.write(prevtags)
170 fp.write(prevtags)
170
171
171 # committed tags are stored in UTF-8
172 # committed tags are stored in UTF-8
172 writetag(fp, name, util.fromlocal, prevtags)
173 writetag(fp, name, util.fromlocal, prevtags)
173
174
174 if use_dirstate and '.hgtags' not in self.dirstate:
175 if use_dirstate and '.hgtags' not in self.dirstate:
175 self.add(['.hgtags'])
176 self.add(['.hgtags'])
176
177
177 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
178 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
178 extra=extra)
179 extra=extra)
179
180
180 self.hook('tag', node=hex(node), tag=name, local=local)
181 self.hook('tag', node=hex(node), tag=name, local=local)
181
182
182 return tagnode
183 return tagnode
183
184
184 def tag(self, name, node, message, local, user, date):
185 def tag(self, name, node, message, local, user, date):
185 '''tag a revision with a symbolic name.
186 '''tag a revision with a symbolic name.
186
187
187 if local is True, the tag is stored in a per-repository file.
188 if local is True, the tag is stored in a per-repository file.
188 otherwise, it is stored in the .hgtags file, and a new
189 otherwise, it is stored in the .hgtags file, and a new
189 changeset is committed with the change.
190 changeset is committed with the change.
190
191
191 keyword arguments:
192 keyword arguments:
192
193
193 local: whether to store tag in non-version-controlled file
194 local: whether to store tag in non-version-controlled file
194 (default False)
195 (default False)
195
196
196 message: commit message to use if committing
197 message: commit message to use if committing
197
198
198 user: name of user to use if committing
199 user: name of user to use if committing
199
200
200 date: date tuple to use if committing'''
201 date: date tuple to use if committing'''
201
202
202 for x in self.status()[:5]:
203 for x in self.status()[:5]:
203 if '.hgtags' in x:
204 if '.hgtags' in x:
204 raise util.Abort(_('working copy of .hgtags is changed '
205 raise util.Abort(_('working copy of .hgtags is changed '
205 '(please commit .hgtags manually)'))
206 '(please commit .hgtags manually)'))
206
207
207
208
208 self._tag(name, node, message, local, user, date)
209 self._tag(name, node, message, local, user, date)
209
210
210 def tags(self):
211 def tags(self):
211 '''return a mapping of tag to node'''
212 '''return a mapping of tag to node'''
212 if self.tagscache:
213 if self.tagscache:
213 return self.tagscache
214 return self.tagscache
214
215
215 globaltags = {}
216 globaltags = {}
216 tagtypes = {}
217 tagtypes = {}
217
218
218 def readtags(lines, fn, tagtype):
219 def readtags(lines, fn, tagtype):
219 filetags = {}
220 filetags = {}
220 count = 0
221 count = 0
221
222
222 def warn(msg):
223 def warn(msg):
223 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
224 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
224
225
225 for l in lines:
226 for l in lines:
226 count += 1
227 count += 1
227 if not l:
228 if not l:
228 continue
229 continue
229 s = l.split(" ", 1)
230 s = l.split(" ", 1)
230 if len(s) != 2:
231 if len(s) != 2:
231 warn(_("cannot parse entry"))
232 warn(_("cannot parse entry"))
232 continue
233 continue
233 node, key = s
234 node, key = s
234 key = util.tolocal(key.strip()) # stored in UTF-8
235 key = util.tolocal(key.strip()) # stored in UTF-8
235 try:
236 try:
236 bin_n = bin(node)
237 bin_n = bin(node)
237 except TypeError:
238 except TypeError:
238 warn(_("node '%s' is not well formed") % node)
239 warn(_("node '%s' is not well formed") % node)
239 continue
240 continue
240 if bin_n not in self.changelog.nodemap:
241 if bin_n not in self.changelog.nodemap:
241 warn(_("tag '%s' refers to unknown node") % key)
242 warn(_("tag '%s' refers to unknown node") % key)
242 continue
243 continue
243
244
244 h = []
245 h = []
245 if key in filetags:
246 if key in filetags:
246 n, h = filetags[key]
247 n, h = filetags[key]
247 h.append(n)
248 h.append(n)
248 filetags[key] = (bin_n, h)
249 filetags[key] = (bin_n, h)
249
250
250 for k, nh in filetags.items():
251 for k, nh in filetags.items():
251 if k not in globaltags:
252 if k not in globaltags:
252 globaltags[k] = nh
253 globaltags[k] = nh
253 tagtypes[k] = tagtype
254 tagtypes[k] = tagtype
254 continue
255 continue
255
256
256 # we prefer the global tag if:
257 # we prefer the global tag if:
257 # it supercedes us OR
258 # it supercedes us OR
258 # mutual supercedes and it has a higher rank
259 # mutual supercedes and it has a higher rank
259 # otherwise we win because we're tip-most
260 # otherwise we win because we're tip-most
260 an, ah = nh
261 an, ah = nh
261 bn, bh = globaltags[k]
262 bn, bh = globaltags[k]
262 if (bn != an and an in bh and
263 if (bn != an and an in bh and
263 (bn not in ah or len(bh) > len(ah))):
264 (bn not in ah or len(bh) > len(ah))):
264 an = bn
265 an = bn
265 ah.extend([n for n in bh if n not in ah])
266 ah.extend([n for n in bh if n not in ah])
266 globaltags[k] = an, ah
267 globaltags[k] = an, ah
267 tagtypes[k] = tagtype
268 tagtypes[k] = tagtype
268
269
269 # read the tags file from each head, ending with the tip
270 # read the tags file from each head, ending with the tip
270 f = None
271 f = None
271 for rev, node, fnode in self._hgtagsnodes():
272 for rev, node, fnode in self._hgtagsnodes():
272 f = (f and f.filectx(fnode) or
273 f = (f and f.filectx(fnode) or
273 self.filectx('.hgtags', fileid=fnode))
274 self.filectx('.hgtags', fileid=fnode))
274 readtags(f.data().splitlines(), f, "global")
275 readtags(f.data().splitlines(), f, "global")
275
276
276 try:
277 try:
277 data = util.fromlocal(self.opener("localtags").read())
278 data = util.fromlocal(self.opener("localtags").read())
278 # localtags are stored in the local character set
279 # localtags are stored in the local character set
279 # while the internal tag table is stored in UTF-8
280 # while the internal tag table is stored in UTF-8
280 readtags(data.splitlines(), "localtags", "local")
281 readtags(data.splitlines(), "localtags", "local")
281 except IOError:
282 except IOError:
282 pass
283 pass
283
284
284 self.tagscache = {}
285 self.tagscache = {}
285 self._tagstypecache = {}
286 self._tagstypecache = {}
286 for k,nh in globaltags.items():
287 for k,nh in globaltags.items():
287 n = nh[0]
288 n = nh[0]
288 if n != nullid:
289 if n != nullid:
289 self.tagscache[k] = n
290 self.tagscache[k] = n
290 self._tagstypecache[k] = tagtypes[k]
291 self._tagstypecache[k] = tagtypes[k]
291 self.tagscache['tip'] = self.changelog.tip()
292 self.tagscache['tip'] = self.changelog.tip()
292
293
293 return self.tagscache
294 return self.tagscache
294
295
295 def tagtype(self, tagname):
296 def tagtype(self, tagname):
296 '''
297 '''
297 return the type of the given tag. result can be:
298 return the type of the given tag. result can be:
298
299
299 'local' : a local tag
300 'local' : a local tag
300 'global' : a global tag
301 'global' : a global tag
301 None : tag does not exist
302 None : tag does not exist
302 '''
303 '''
303
304
304 self.tags()
305 self.tags()
305
306
306 return self._tagstypecache.get(tagname)
307 return self._tagstypecache.get(tagname)
307
308
308 def _hgtagsnodes(self):
309 def _hgtagsnodes(self):
309 heads = self.heads()
310 heads = self.heads()
310 heads.reverse()
311 heads.reverse()
311 last = {}
312 last = {}
312 ret = []
313 ret = []
313 for node in heads:
314 for node in heads:
314 c = self.changectx(node)
315 c = self.changectx(node)
315 rev = c.rev()
316 rev = c.rev()
316 try:
317 try:
317 fnode = c.filenode('.hgtags')
318 fnode = c.filenode('.hgtags')
318 except revlog.LookupError:
319 except revlog.LookupError:
319 continue
320 continue
320 ret.append((rev, node, fnode))
321 ret.append((rev, node, fnode))
321 if fnode in last:
322 if fnode in last:
322 ret[last[fnode]] = None
323 ret[last[fnode]] = None
323 last[fnode] = len(ret) - 1
324 last[fnode] = len(ret) - 1
324 return [item for item in ret if item]
325 return [item for item in ret if item]
325
326
326 def tagslist(self):
327 def tagslist(self):
327 '''return a list of tags ordered by revision'''
328 '''return a list of tags ordered by revision'''
328 l = []
329 l = []
329 for t, n in self.tags().items():
330 for t, n in self.tags().items():
330 try:
331 try:
331 r = self.changelog.rev(n)
332 r = self.changelog.rev(n)
332 except:
333 except:
333 r = -2 # sort to the beginning of the list if unknown
334 r = -2 # sort to the beginning of the list if unknown
334 l.append((r, t, n))
335 l.append((r, t, n))
335 l.sort()
336 l.sort()
336 return [(t, n) for r, t, n in l]
337 return [(t, n) for r, t, n in l]
337
338
338 def nodetags(self, node):
339 def nodetags(self, node):
339 '''return the tags associated with a node'''
340 '''return the tags associated with a node'''
340 if not self.nodetagscache:
341 if not self.nodetagscache:
341 self.nodetagscache = {}
342 self.nodetagscache = {}
342 for t, n in self.tags().items():
343 for t, n in self.tags().items():
343 self.nodetagscache.setdefault(n, []).append(t)
344 self.nodetagscache.setdefault(n, []).append(t)
344 return self.nodetagscache.get(node, [])
345 return self.nodetagscache.get(node, [])
345
346
346 def _branchtags(self):
347 def _branchtags(self):
347 partial, last, lrev = self._readbranchcache()
348 partial, last, lrev = self._readbranchcache()
348
349
349 tiprev = self.changelog.count() - 1
350 tiprev = self.changelog.count() - 1
350 if lrev != tiprev:
351 if lrev != tiprev:
351 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353
354
354 return partial
355 return partial
355
356
356 def branchtags(self):
357 def branchtags(self):
357 if self.branchcache is not None:
358 if self.branchcache is not None:
358 return self.branchcache
359 return self.branchcache
359
360
360 self.branchcache = {} # avoid recursion in changectx
361 self.branchcache = {} # avoid recursion in changectx
361 partial = self._branchtags()
362 partial = self._branchtags()
362
363
363 # the branch cache is stored on disk as UTF-8, but in the local
364 # the branch cache is stored on disk as UTF-8, but in the local
364 # charset internally
365 # charset internally
365 for k, v in partial.items():
366 for k, v in partial.items():
366 self.branchcache[util.tolocal(k)] = v
367 self.branchcache[util.tolocal(k)] = v
367 return self.branchcache
368 return self.branchcache
368
369
369 def _readbranchcache(self):
370 def _readbranchcache(self):
370 partial = {}
371 partial = {}
371 try:
372 try:
372 f = self.opener("branch.cache")
373 f = self.opener("branch.cache")
373 lines = f.read().split('\n')
374 lines = f.read().split('\n')
374 f.close()
375 f.close()
375 except (IOError, OSError):
376 except (IOError, OSError):
376 return {}, nullid, nullrev
377 return {}, nullid, nullrev
377
378
378 try:
379 try:
379 last, lrev = lines.pop(0).split(" ", 1)
380 last, lrev = lines.pop(0).split(" ", 1)
380 last, lrev = bin(last), int(lrev)
381 last, lrev = bin(last), int(lrev)
381 if not (lrev < self.changelog.count() and
382 if not (lrev < self.changelog.count() and
382 self.changelog.node(lrev) == last): # sanity check
383 self.changelog.node(lrev) == last): # sanity check
383 # invalidate the cache
384 # invalidate the cache
384 raise ValueError('invalidating branch cache (tip differs)')
385 raise ValueError('invalidating branch cache (tip differs)')
385 for l in lines:
386 for l in lines:
386 if not l: continue
387 if not l: continue
387 node, label = l.split(" ", 1)
388 node, label = l.split(" ", 1)
388 partial[label.strip()] = bin(node)
389 partial[label.strip()] = bin(node)
389 except (KeyboardInterrupt, util.SignalInterrupt):
390 except (KeyboardInterrupt, util.SignalInterrupt):
390 raise
391 raise
391 except Exception, inst:
392 except Exception, inst:
392 if self.ui.debugflag:
393 if self.ui.debugflag:
393 self.ui.warn(str(inst), '\n')
394 self.ui.warn(str(inst), '\n')
394 partial, last, lrev = {}, nullid, nullrev
395 partial, last, lrev = {}, nullid, nullrev
395 return partial, last, lrev
396 return partial, last, lrev
396
397
397 def _writebranchcache(self, branches, tip, tiprev):
398 def _writebranchcache(self, branches, tip, tiprev):
398 try:
399 try:
399 f = self.opener("branch.cache", "w", atomictemp=True)
400 f = self.opener("branch.cache", "w", atomictemp=True)
400 f.write("%s %s\n" % (hex(tip), tiprev))
401 f.write("%s %s\n" % (hex(tip), tiprev))
401 for label, node in branches.iteritems():
402 for label, node in branches.iteritems():
402 f.write("%s %s\n" % (hex(node), label))
403 f.write("%s %s\n" % (hex(node), label))
403 f.rename()
404 f.rename()
404 except (IOError, OSError):
405 except (IOError, OSError):
405 pass
406 pass
406
407
407 def _updatebranchcache(self, partial, start, end):
408 def _updatebranchcache(self, partial, start, end):
408 for r in xrange(start, end):
409 for r in xrange(start, end):
409 c = self.changectx(r)
410 c = self.changectx(r)
410 b = c.branch()
411 b = c.branch()
411 partial[b] = c.node()
412 partial[b] = c.node()
412
413
413 def lookup(self, key):
414 def lookup(self, key):
414 if key == '.':
415 if key == '.':
415 key, second = self.dirstate.parents()
416 key, second = self.dirstate.parents()
416 if key == nullid:
417 if key == nullid:
417 raise repo.RepoError(_("no revision checked out"))
418 raise repo.RepoError(_("no revision checked out"))
418 if second != nullid:
419 if second != nullid:
419 self.ui.warn(_("warning: working directory has two parents, "
420 self.ui.warn(_("warning: working directory has two parents, "
420 "tag '.' uses the first\n"))
421 "tag '.' uses the first\n"))
421 elif key == 'null':
422 elif key == 'null':
422 return nullid
423 return nullid
423 n = self.changelog._match(key)
424 n = self.changelog._match(key)
424 if n:
425 if n:
425 return n
426 return n
426 if key in self.tags():
427 if key in self.tags():
427 return self.tags()[key]
428 return self.tags()[key]
428 if key in self.branchtags():
429 if key in self.branchtags():
429 return self.branchtags()[key]
430 return self.branchtags()[key]
430 n = self.changelog._partialmatch(key)
431 n = self.changelog._partialmatch(key)
431 if n:
432 if n:
432 return n
433 return n
433 try:
434 try:
434 if len(key) == 20:
435 if len(key) == 20:
435 key = hex(key)
436 key = hex(key)
436 except:
437 except:
437 pass
438 pass
438 raise repo.RepoError(_("unknown revision '%s'") % key)
439 raise repo.RepoError(_("unknown revision '%s'") % key)
439
440
440 def dev(self):
441 def dev(self):
441 return os.lstat(self.path).st_dev
442 return os.lstat(self.path).st_dev
442
443
443 def local(self):
444 def local(self):
444 return True
445 return True
445
446
446 def join(self, f):
447 def join(self, f):
447 return os.path.join(self.path, f)
448 return os.path.join(self.path, f)
448
449
449 def sjoin(self, f):
450 def sjoin(self, f):
450 f = self.encodefn(f)
451 f = self.encodefn(f)
451 return os.path.join(self.spath, f)
452 return os.path.join(self.spath, f)
452
453
453 def wjoin(self, f):
454 def wjoin(self, f):
454 return os.path.join(self.root, f)
455 return os.path.join(self.root, f)
455
456
456 def file(self, f):
457 def file(self, f):
457 if f[0] == '/':
458 if f[0] == '/':
458 f = f[1:]
459 f = f[1:]
459 return filelog.filelog(self.sopener, f)
460 return filelog.filelog(self.sopener, f)
460
461
461 def changectx(self, changeid=None):
462 def changectx(self, changeid=None):
462 return context.changectx(self, changeid)
463 return context.changectx(self, changeid)
463
464
464 def workingctx(self):
465 def workingctx(self):
465 return context.workingctx(self)
466 return context.workingctx(self)
466
467
467 def parents(self, changeid=None):
468 def parents(self, changeid=None):
468 '''
469 '''
469 get list of changectxs for parents of changeid or working directory
470 get list of changectxs for parents of changeid or working directory
470 '''
471 '''
471 if changeid is None:
472 if changeid is None:
472 pl = self.dirstate.parents()
473 pl = self.dirstate.parents()
473 else:
474 else:
474 n = self.changelog.lookup(changeid)
475 n = self.changelog.lookup(changeid)
475 pl = self.changelog.parents(n)
476 pl = self.changelog.parents(n)
476 if pl[1] == nullid:
477 if pl[1] == nullid:
477 return [self.changectx(pl[0])]
478 return [self.changectx(pl[0])]
478 return [self.changectx(pl[0]), self.changectx(pl[1])]
479 return [self.changectx(pl[0]), self.changectx(pl[1])]
479
480
480 def filectx(self, path, changeid=None, fileid=None):
481 def filectx(self, path, changeid=None, fileid=None):
481 """changeid can be a changeset revision, node, or tag.
482 """changeid can be a changeset revision, node, or tag.
482 fileid can be a file revision or node."""
483 fileid can be a file revision or node."""
483 return context.filectx(self, path, changeid, fileid)
484 return context.filectx(self, path, changeid, fileid)
484
485
485 def getcwd(self):
486 def getcwd(self):
486 return self.dirstate.getcwd()
487 return self.dirstate.getcwd()
487
488
488 def pathto(self, f, cwd=None):
489 def pathto(self, f, cwd=None):
489 return self.dirstate.pathto(f, cwd)
490 return self.dirstate.pathto(f, cwd)
490
491
491 def wfile(self, f, mode='r'):
492 def wfile(self, f, mode='r'):
492 return self.wopener(f, mode)
493 return self.wopener(f, mode)
493
494
494 def _link(self, f):
495 def _link(self, f):
495 return os.path.islink(self.wjoin(f))
496 return os.path.islink(self.wjoin(f))
496
497
497 def _filter(self, filter, filename, data):
498 def _filter(self, filter, filename, data):
498 if filter not in self.filterpats:
499 if filter not in self.filterpats:
499 l = []
500 l = []
500 for pat, cmd in self.ui.configitems(filter):
501 for pat, cmd in self.ui.configitems(filter):
501 mf = util.matcher(self.root, "", [pat], [], [])[1]
502 mf = util.matcher(self.root, "", [pat], [], [])[1]
502 fn = None
503 fn = None
503 for name, filterfn in self._datafilters.iteritems():
504 for name, filterfn in self._datafilters.iteritems():
504 if cmd.startswith(name):
505 if cmd.startswith(name):
505 fn = filterfn
506 fn = filterfn
506 break
507 break
507 if not fn:
508 if not fn:
508 fn = lambda s, c, **kwargs: util.filter(s, c)
509 fn = lambda s, c, **kwargs: util.filter(s, c)
509 # Wrap old filters not supporting keyword arguments
510 # Wrap old filters not supporting keyword arguments
510 if not inspect.getargspec(fn)[2]:
511 if not inspect.getargspec(fn)[2]:
511 oldfn = fn
512 oldfn = fn
512 fn = lambda s, c, **kwargs: oldfn(s, c)
513 fn = lambda s, c, **kwargs: oldfn(s, c)
513 l.append((mf, fn, cmd))
514 l.append((mf, fn, cmd))
514 self.filterpats[filter] = l
515 self.filterpats[filter] = l
515
516
516 for mf, fn, cmd in self.filterpats[filter]:
517 for mf, fn, cmd in self.filterpats[filter]:
517 if mf(filename):
518 if mf(filename):
518 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
519 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
519 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
520 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
520 break
521 break
521
522
522 return data
523 return data
523
524
524 def adddatafilter(self, name, filter):
525 def adddatafilter(self, name, filter):
525 self._datafilters[name] = filter
526 self._datafilters[name] = filter
526
527
527 def wread(self, filename):
528 def wread(self, filename):
528 if self._link(filename):
529 if self._link(filename):
529 data = os.readlink(self.wjoin(filename))
530 data = os.readlink(self.wjoin(filename))
530 else:
531 else:
531 data = self.wopener(filename, 'r').read()
532 data = self.wopener(filename, 'r').read()
532 return self._filter("encode", filename, data)
533 return self._filter("encode", filename, data)
533
534
534 def wwrite(self, filename, data, flags):
535 def wwrite(self, filename, data, flags):
535 data = self._filter("decode", filename, data)
536 data = self._filter("decode", filename, data)
536 try:
537 try:
537 os.unlink(self.wjoin(filename))
538 os.unlink(self.wjoin(filename))
538 except OSError:
539 except OSError:
539 pass
540 pass
540 self.wopener(filename, 'w').write(data)
541 self.wopener(filename, 'w').write(data)
541 util.set_flags(self.wjoin(filename), flags)
542 util.set_flags(self.wjoin(filename), flags)
542
543
543 def wwritedata(self, filename, data):
544 def wwritedata(self, filename, data):
544 return self._filter("decode", filename, data)
545 return self._filter("decode", filename, data)
545
546
546 def transaction(self):
547 def transaction(self):
547 if self._transref and self._transref():
548 if self._transref and self._transref():
548 return self._transref().nest()
549 return self._transref().nest()
549
550
550 # abort here if the journal already exists
551 # abort here if the journal already exists
551 if os.path.exists(self.sjoin("journal")):
552 if os.path.exists(self.sjoin("journal")):
552 raise repo.RepoError(_("journal already exists - run hg recover"))
553 raise repo.RepoError(_("journal already exists - run hg recover"))
553
554
554 # save dirstate for rollback
555 # save dirstate for rollback
555 try:
556 try:
556 ds = self.opener("dirstate").read()
557 ds = self.opener("dirstate").read()
557 except IOError:
558 except IOError:
558 ds = ""
559 ds = ""
559 self.opener("journal.dirstate", "w").write(ds)
560 self.opener("journal.dirstate", "w").write(ds)
560 self.opener("journal.branch", "w").write(self.dirstate.branch())
561 self.opener("journal.branch", "w").write(self.dirstate.branch())
561
562
562 renames = [(self.sjoin("journal"), self.sjoin("undo")),
563 renames = [(self.sjoin("journal"), self.sjoin("undo")),
563 (self.join("journal.dirstate"), self.join("undo.dirstate")),
564 (self.join("journal.dirstate"), self.join("undo.dirstate")),
564 (self.join("journal.branch"), self.join("undo.branch"))]
565 (self.join("journal.branch"), self.join("undo.branch"))]
565 tr = transaction.transaction(self.ui.warn, self.sopener,
566 tr = transaction.transaction(self.ui.warn, self.sopener,
566 self.sjoin("journal"),
567 self.sjoin("journal"),
567 aftertrans(renames))
568 aftertrans(renames),
569 self._createmode)
568 self._transref = weakref.ref(tr)
570 self._transref = weakref.ref(tr)
569 return tr
571 return tr
570
572
571 def recover(self):
573 def recover(self):
572 l = self.lock()
574 l = self.lock()
573 try:
575 try:
574 if os.path.exists(self.sjoin("journal")):
576 if os.path.exists(self.sjoin("journal")):
575 self.ui.status(_("rolling back interrupted transaction\n"))
577 self.ui.status(_("rolling back interrupted transaction\n"))
576 transaction.rollback(self.sopener, self.sjoin("journal"))
578 transaction.rollback(self.sopener, self.sjoin("journal"))
577 self.invalidate()
579 self.invalidate()
578 return True
580 return True
579 else:
581 else:
580 self.ui.warn(_("no interrupted transaction available\n"))
582 self.ui.warn(_("no interrupted transaction available\n"))
581 return False
583 return False
582 finally:
584 finally:
583 del l
585 del l
584
586
585 def rollback(self):
587 def rollback(self):
586 wlock = lock = None
588 wlock = lock = None
587 try:
589 try:
588 wlock = self.wlock()
590 wlock = self.wlock()
589 lock = self.lock()
591 lock = self.lock()
590 if os.path.exists(self.sjoin("undo")):
592 if os.path.exists(self.sjoin("undo")):
591 self.ui.status(_("rolling back last transaction\n"))
593 self.ui.status(_("rolling back last transaction\n"))
592 transaction.rollback(self.sopener, self.sjoin("undo"))
594 transaction.rollback(self.sopener, self.sjoin("undo"))
593 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
595 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
594 try:
596 try:
595 branch = self.opener("undo.branch").read()
597 branch = self.opener("undo.branch").read()
596 self.dirstate.setbranch(branch)
598 self.dirstate.setbranch(branch)
597 except IOError:
599 except IOError:
598 self.ui.warn(_("Named branch could not be reset, "
600 self.ui.warn(_("Named branch could not be reset, "
599 "current branch still is: %s\n")
601 "current branch still is: %s\n")
600 % util.tolocal(self.dirstate.branch()))
602 % util.tolocal(self.dirstate.branch()))
601 self.invalidate()
603 self.invalidate()
602 self.dirstate.invalidate()
604 self.dirstate.invalidate()
603 else:
605 else:
604 self.ui.warn(_("no rollback information available\n"))
606 self.ui.warn(_("no rollback information available\n"))
605 finally:
607 finally:
606 del lock, wlock
608 del lock, wlock
607
609
608 def invalidate(self):
610 def invalidate(self):
609 for a in "changelog manifest".split():
611 for a in "changelog manifest".split():
610 if hasattr(self, a):
612 if hasattr(self, a):
611 self.__delattr__(a)
613 self.__delattr__(a)
612 self.tagscache = None
614 self.tagscache = None
613 self._tagstypecache = None
615 self._tagstypecache = None
614 self.nodetagscache = None
616 self.nodetagscache = None
615
617
616 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
618 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
617 try:
619 try:
618 l = lock.lock(lockname, 0, releasefn, desc=desc)
620 l = lock.lock(lockname, 0, releasefn, desc=desc)
619 except lock.LockHeld, inst:
621 except lock.LockHeld, inst:
620 if not wait:
622 if not wait:
621 raise
623 raise
622 self.ui.warn(_("waiting for lock on %s held by %r\n") %
624 self.ui.warn(_("waiting for lock on %s held by %r\n") %
623 (desc, inst.locker))
625 (desc, inst.locker))
624 # default to 600 seconds timeout
626 # default to 600 seconds timeout
625 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
627 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
626 releasefn, desc=desc)
628 releasefn, desc=desc)
627 if acquirefn:
629 if acquirefn:
628 acquirefn()
630 acquirefn()
629 return l
631 return l
630
632
631 def lock(self, wait=True):
633 def lock(self, wait=True):
632 if self._lockref and self._lockref():
634 if self._lockref and self._lockref():
633 return self._lockref()
635 return self._lockref()
634
636
635 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
637 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
636 _('repository %s') % self.origroot)
638 _('repository %s') % self.origroot)
637 self._lockref = weakref.ref(l)
639 self._lockref = weakref.ref(l)
638 return l
640 return l
639
641
640 def wlock(self, wait=True):
642 def wlock(self, wait=True):
641 if self._wlockref and self._wlockref():
643 if self._wlockref and self._wlockref():
642 return self._wlockref()
644 return self._wlockref()
643
645
644 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
646 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
645 self.dirstate.invalidate, _('working directory of %s') %
647 self.dirstate.invalidate, _('working directory of %s') %
646 self.origroot)
648 self.origroot)
647 self._wlockref = weakref.ref(l)
649 self._wlockref = weakref.ref(l)
648 return l
650 return l
649
651
650 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
652 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
651 """
653 """
652 commit an individual file as part of a larger transaction
654 commit an individual file as part of a larger transaction
653 """
655 """
654
656
655 t = self.wread(fn)
657 t = self.wread(fn)
656 fl = self.file(fn)
658 fl = self.file(fn)
657 fp1 = manifest1.get(fn, nullid)
659 fp1 = manifest1.get(fn, nullid)
658 fp2 = manifest2.get(fn, nullid)
660 fp2 = manifest2.get(fn, nullid)
659
661
660 meta = {}
662 meta = {}
661 cp = self.dirstate.copied(fn)
663 cp = self.dirstate.copied(fn)
662 if cp:
664 if cp:
663 # Mark the new revision of this file as a copy of another
665 # Mark the new revision of this file as a copy of another
664 # file. This copy data will effectively act as a parent
666 # file. This copy data will effectively act as a parent
665 # of this new revision. If this is a merge, the first
667 # of this new revision. If this is a merge, the first
666 # parent will be the nullid (meaning "look up the copy data")
668 # parent will be the nullid (meaning "look up the copy data")
667 # and the second one will be the other parent. For example:
669 # and the second one will be the other parent. For example:
668 #
670 #
669 # 0 --- 1 --- 3 rev1 changes file foo
671 # 0 --- 1 --- 3 rev1 changes file foo
670 # \ / rev2 renames foo to bar and changes it
672 # \ / rev2 renames foo to bar and changes it
671 # \- 2 -/ rev3 should have bar with all changes and
673 # \- 2 -/ rev3 should have bar with all changes and
672 # should record that bar descends from
674 # should record that bar descends from
673 # bar in rev2 and foo in rev1
675 # bar in rev2 and foo in rev1
674 #
676 #
675 # this allows this merge to succeed:
677 # this allows this merge to succeed:
676 #
678 #
677 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
679 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
678 # \ / merging rev3 and rev4 should use bar@rev2
680 # \ / merging rev3 and rev4 should use bar@rev2
679 # \- 2 --- 4 as the merge base
681 # \- 2 --- 4 as the merge base
680 #
682 #
681 meta["copy"] = cp
683 meta["copy"] = cp
682 if not manifest2: # not a branch merge
684 if not manifest2: # not a branch merge
683 meta["copyrev"] = hex(manifest1.get(cp, nullid))
685 meta["copyrev"] = hex(manifest1.get(cp, nullid))
684 fp2 = nullid
686 fp2 = nullid
685 elif fp2 != nullid: # copied on remote side
687 elif fp2 != nullid: # copied on remote side
686 meta["copyrev"] = hex(manifest1.get(cp, nullid))
688 meta["copyrev"] = hex(manifest1.get(cp, nullid))
687 elif fp1 != nullid: # copied on local side, reversed
689 elif fp1 != nullid: # copied on local side, reversed
688 meta["copyrev"] = hex(manifest2.get(cp))
690 meta["copyrev"] = hex(manifest2.get(cp))
689 fp2 = fp1
691 fp2 = fp1
690 elif cp in manifest2: # directory rename on local side
692 elif cp in manifest2: # directory rename on local side
691 meta["copyrev"] = hex(manifest2[cp])
693 meta["copyrev"] = hex(manifest2[cp])
692 else: # directory rename on remote side
694 else: # directory rename on remote side
693 meta["copyrev"] = hex(manifest1.get(cp, nullid))
695 meta["copyrev"] = hex(manifest1.get(cp, nullid))
694 self.ui.debug(_(" %s: copy %s:%s\n") %
696 self.ui.debug(_(" %s: copy %s:%s\n") %
695 (fn, cp, meta["copyrev"]))
697 (fn, cp, meta["copyrev"]))
696 fp1 = nullid
698 fp1 = nullid
697 elif fp2 != nullid:
699 elif fp2 != nullid:
698 # is one parent an ancestor of the other?
700 # is one parent an ancestor of the other?
699 fpa = fl.ancestor(fp1, fp2)
701 fpa = fl.ancestor(fp1, fp2)
700 if fpa == fp1:
702 if fpa == fp1:
701 fp1, fp2 = fp2, nullid
703 fp1, fp2 = fp2, nullid
702 elif fpa == fp2:
704 elif fpa == fp2:
703 fp2 = nullid
705 fp2 = nullid
704
706
705 # is the file unmodified from the parent? report existing entry
707 # is the file unmodified from the parent? report existing entry
706 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
708 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
707 return fp1
709 return fp1
708
710
709 changelist.append(fn)
711 changelist.append(fn)
710 return fl.add(t, meta, tr, linkrev, fp1, fp2)
712 return fl.add(t, meta, tr, linkrev, fp1, fp2)
711
713
712 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
714 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
713 if p1 is None:
715 if p1 is None:
714 p1, p2 = self.dirstate.parents()
716 p1, p2 = self.dirstate.parents()
715 return self.commit(files=files, text=text, user=user, date=date,
717 return self.commit(files=files, text=text, user=user, date=date,
716 p1=p1, p2=p2, extra=extra, empty_ok=True)
718 p1=p1, p2=p2, extra=extra, empty_ok=True)
717
719
718 def commit(self, files=None, text="", user=None, date=None,
720 def commit(self, files=None, text="", user=None, date=None,
719 match=util.always, force=False, force_editor=False,
721 match=util.always, force=False, force_editor=False,
720 p1=None, p2=None, extra={}, empty_ok=False):
722 p1=None, p2=None, extra={}, empty_ok=False):
721 wlock = lock = tr = None
723 wlock = lock = tr = None
722 valid = 0 # don't save the dirstate if this isn't set
724 valid = 0 # don't save the dirstate if this isn't set
723 if files:
725 if files:
724 files = util.unique(files)
726 files = util.unique(files)
725 try:
727 try:
726 commit = []
728 commit = []
727 remove = []
729 remove = []
728 changed = []
730 changed = []
729 use_dirstate = (p1 is None) # not rawcommit
731 use_dirstate = (p1 is None) # not rawcommit
730 extra = extra.copy()
732 extra = extra.copy()
731
733
732 if use_dirstate:
734 if use_dirstate:
733 if files:
735 if files:
734 for f in files:
736 for f in files:
735 s = self.dirstate[f]
737 s = self.dirstate[f]
736 if s in 'nma':
738 if s in 'nma':
737 commit.append(f)
739 commit.append(f)
738 elif s == 'r':
740 elif s == 'r':
739 remove.append(f)
741 remove.append(f)
740 else:
742 else:
741 self.ui.warn(_("%s not tracked!\n") % f)
743 self.ui.warn(_("%s not tracked!\n") % f)
742 else:
744 else:
743 changes = self.status(match=match)[:5]
745 changes = self.status(match=match)[:5]
744 modified, added, removed, deleted, unknown = changes
746 modified, added, removed, deleted, unknown = changes
745 commit = modified + added
747 commit = modified + added
746 remove = removed
748 remove = removed
747 else:
749 else:
748 commit = files
750 commit = files
749
751
750 if use_dirstate:
752 if use_dirstate:
751 p1, p2 = self.dirstate.parents()
753 p1, p2 = self.dirstate.parents()
752 update_dirstate = True
754 update_dirstate = True
753 else:
755 else:
754 p1, p2 = p1, p2 or nullid
756 p1, p2 = p1, p2 or nullid
755 update_dirstate = (self.dirstate.parents()[0] == p1)
757 update_dirstate = (self.dirstate.parents()[0] == p1)
756
758
757 c1 = self.changelog.read(p1)
759 c1 = self.changelog.read(p1)
758 c2 = self.changelog.read(p2)
760 c2 = self.changelog.read(p2)
759 m1 = self.manifest.read(c1[0]).copy()
761 m1 = self.manifest.read(c1[0]).copy()
760 m2 = self.manifest.read(c2[0])
762 m2 = self.manifest.read(c2[0])
761
763
762 if use_dirstate:
764 if use_dirstate:
763 branchname = self.workingctx().branch()
765 branchname = self.workingctx().branch()
764 try:
766 try:
765 branchname = branchname.decode('UTF-8').encode('UTF-8')
767 branchname = branchname.decode('UTF-8').encode('UTF-8')
766 except UnicodeDecodeError:
768 except UnicodeDecodeError:
767 raise util.Abort(_('branch name not in UTF-8!'))
769 raise util.Abort(_('branch name not in UTF-8!'))
768 else:
770 else:
769 branchname = ""
771 branchname = ""
770
772
771 if use_dirstate:
773 if use_dirstate:
772 oldname = c1[5].get("branch") # stored in UTF-8
774 oldname = c1[5].get("branch") # stored in UTF-8
773 if (not commit and not remove and not force and p2 == nullid
775 if (not commit and not remove and not force and p2 == nullid
774 and branchname == oldname):
776 and branchname == oldname):
775 self.ui.status(_("nothing changed\n"))
777 self.ui.status(_("nothing changed\n"))
776 return None
778 return None
777
779
778 xp1 = hex(p1)
780 xp1 = hex(p1)
779 if p2 == nullid: xp2 = ''
781 if p2 == nullid: xp2 = ''
780 else: xp2 = hex(p2)
782 else: xp2 = hex(p2)
781
783
782 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
784 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
783
785
784 wlock = self.wlock()
786 wlock = self.wlock()
785 lock = self.lock()
787 lock = self.lock()
786 tr = self.transaction()
788 tr = self.transaction()
787 trp = weakref.proxy(tr)
789 trp = weakref.proxy(tr)
788
790
789 # check in files
791 # check in files
790 new = {}
792 new = {}
791 linkrev = self.changelog.count()
793 linkrev = self.changelog.count()
792 commit.sort()
794 commit.sort()
793 is_exec = util.execfunc(self.root, m1.execf)
795 is_exec = util.execfunc(self.root, m1.execf)
794 is_link = util.linkfunc(self.root, m1.linkf)
796 is_link = util.linkfunc(self.root, m1.linkf)
795 for f in commit:
797 for f in commit:
796 self.ui.note(f + "\n")
798 self.ui.note(f + "\n")
797 try:
799 try:
798 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
800 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
799 new_exec = is_exec(f)
801 new_exec = is_exec(f)
800 new_link = is_link(f)
802 new_link = is_link(f)
801 if ((not changed or changed[-1] != f) and
803 if ((not changed or changed[-1] != f) and
802 m2.get(f) != new[f]):
804 m2.get(f) != new[f]):
803 # mention the file in the changelog if some
805 # mention the file in the changelog if some
804 # flag changed, even if there was no content
806 # flag changed, even if there was no content
805 # change.
807 # change.
806 old_exec = m1.execf(f)
808 old_exec = m1.execf(f)
807 old_link = m1.linkf(f)
809 old_link = m1.linkf(f)
808 if old_exec != new_exec or old_link != new_link:
810 if old_exec != new_exec or old_link != new_link:
809 changed.append(f)
811 changed.append(f)
810 m1.set(f, new_exec, new_link)
812 m1.set(f, new_exec, new_link)
811 if use_dirstate:
813 if use_dirstate:
812 self.dirstate.normal(f)
814 self.dirstate.normal(f)
813
815
814 except (OSError, IOError):
816 except (OSError, IOError):
815 if use_dirstate:
817 if use_dirstate:
816 self.ui.warn(_("trouble committing %s!\n") % f)
818 self.ui.warn(_("trouble committing %s!\n") % f)
817 raise
819 raise
818 else:
820 else:
819 remove.append(f)
821 remove.append(f)
820
822
821 # update manifest
823 # update manifest
822 m1.update(new)
824 m1.update(new)
823 remove.sort()
825 remove.sort()
824 removed = []
826 removed = []
825
827
826 for f in remove:
828 for f in remove:
827 if f in m1:
829 if f in m1:
828 del m1[f]
830 del m1[f]
829 removed.append(f)
831 removed.append(f)
830 elif f in m2:
832 elif f in m2:
831 removed.append(f)
833 removed.append(f)
832 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
834 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
833 (new, removed))
835 (new, removed))
834
836
835 # add changeset
837 # add changeset
836 new = new.keys()
838 new = new.keys()
837 new.sort()
839 new.sort()
838
840
839 user = user or self.ui.username()
841 user = user or self.ui.username()
840 if (not empty_ok and not text) or force_editor:
842 if (not empty_ok and not text) or force_editor:
841 edittext = []
843 edittext = []
842 if text:
844 if text:
843 edittext.append(text)
845 edittext.append(text)
844 edittext.append("")
846 edittext.append("")
845 edittext.append(_("HG: Enter commit message."
847 edittext.append(_("HG: Enter commit message."
846 " Lines beginning with 'HG:' are removed."))
848 " Lines beginning with 'HG:' are removed."))
847 edittext.append("HG: --")
849 edittext.append("HG: --")
848 edittext.append("HG: user: %s" % user)
850 edittext.append("HG: user: %s" % user)
849 if p2 != nullid:
851 if p2 != nullid:
850 edittext.append("HG: branch merge")
852 edittext.append("HG: branch merge")
851 if branchname:
853 if branchname:
852 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
854 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
853 edittext.extend(["HG: changed %s" % f for f in changed])
855 edittext.extend(["HG: changed %s" % f for f in changed])
854 edittext.extend(["HG: removed %s" % f for f in removed])
856 edittext.extend(["HG: removed %s" % f for f in removed])
855 if not changed and not remove:
857 if not changed and not remove:
856 edittext.append("HG: no files changed")
858 edittext.append("HG: no files changed")
857 edittext.append("")
859 edittext.append("")
858 # run editor in the repository root
860 # run editor in the repository root
859 olddir = os.getcwd()
861 olddir = os.getcwd()
860 os.chdir(self.root)
862 os.chdir(self.root)
861 text = self.ui.edit("\n".join(edittext), user)
863 text = self.ui.edit("\n".join(edittext), user)
862 os.chdir(olddir)
864 os.chdir(olddir)
863
865
864 if branchname:
866 if branchname:
865 extra["branch"] = branchname
867 extra["branch"] = branchname
866
868
867 if use_dirstate:
869 if use_dirstate:
868 lines = [line.rstrip() for line in text.rstrip().splitlines()]
870 lines = [line.rstrip() for line in text.rstrip().splitlines()]
869 while lines and not lines[0]:
871 while lines and not lines[0]:
870 del lines[0]
872 del lines[0]
871 if not lines:
873 if not lines:
872 raise util.Abort(_("empty commit message"))
874 raise util.Abort(_("empty commit message"))
873 text = '\n'.join(lines)
875 text = '\n'.join(lines)
874
876
875 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
877 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
876 user, date, extra)
878 user, date, extra)
877 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
879 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
878 parent2=xp2)
880 parent2=xp2)
879 tr.close()
881 tr.close()
880
882
881 if self.branchcache and "branch" in extra:
883 if self.branchcache and "branch" in extra:
882 self.branchcache[util.tolocal(extra["branch"])] = n
884 self.branchcache[util.tolocal(extra["branch"])] = n
883
885
884 if use_dirstate or update_dirstate:
886 if use_dirstate or update_dirstate:
885 self.dirstate.setparents(n)
887 self.dirstate.setparents(n)
886 if use_dirstate:
888 if use_dirstate:
887 for f in removed:
889 for f in removed:
888 self.dirstate.forget(f)
890 self.dirstate.forget(f)
889 valid = 1 # our dirstate updates are complete
891 valid = 1 # our dirstate updates are complete
890
892
891 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
893 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
892 return n
894 return n
893 finally:
895 finally:
894 if not valid: # don't save our updated dirstate
896 if not valid: # don't save our updated dirstate
895 self.dirstate.invalidate()
897 self.dirstate.invalidate()
896 del tr, lock, wlock
898 del tr, lock, wlock
897
899
898 def walk(self, node=None, files=[], match=util.always, badmatch=None):
900 def walk(self, node=None, files=[], match=util.always, badmatch=None):
899 '''
901 '''
900 walk recursively through the directory tree or a given
902 walk recursively through the directory tree or a given
901 changeset, finding all files matched by the match
903 changeset, finding all files matched by the match
902 function
904 function
903
905
904 results are yielded in a tuple (src, filename), where src
906 results are yielded in a tuple (src, filename), where src
905 is one of:
907 is one of:
906 'f' the file was found in the directory tree
908 'f' the file was found in the directory tree
907 'm' the file was only in the dirstate and not in the tree
909 'm' the file was only in the dirstate and not in the tree
908 'b' file was not found and matched badmatch
910 'b' file was not found and matched badmatch
909 '''
911 '''
910
912
911 if node:
913 if node:
912 fdict = dict.fromkeys(files)
914 fdict = dict.fromkeys(files)
913 # for dirstate.walk, files=['.'] means "walk the whole tree".
915 # for dirstate.walk, files=['.'] means "walk the whole tree".
914 # follow that here, too
916 # follow that here, too
915 fdict.pop('.', None)
917 fdict.pop('.', None)
916 mdict = self.manifest.read(self.changelog.read(node)[0])
918 mdict = self.manifest.read(self.changelog.read(node)[0])
917 mfiles = mdict.keys()
919 mfiles = mdict.keys()
918 mfiles.sort()
920 mfiles.sort()
919 for fn in mfiles:
921 for fn in mfiles:
920 for ffn in fdict:
922 for ffn in fdict:
921 # match if the file is the exact name or a directory
923 # match if the file is the exact name or a directory
922 if ffn == fn or fn.startswith("%s/" % ffn):
924 if ffn == fn or fn.startswith("%s/" % ffn):
923 del fdict[ffn]
925 del fdict[ffn]
924 break
926 break
925 if match(fn):
927 if match(fn):
926 yield 'm', fn
928 yield 'm', fn
927 ffiles = fdict.keys()
929 ffiles = fdict.keys()
928 ffiles.sort()
930 ffiles.sort()
929 for fn in ffiles:
931 for fn in ffiles:
930 if badmatch and badmatch(fn):
932 if badmatch and badmatch(fn):
931 if match(fn):
933 if match(fn):
932 yield 'b', fn
934 yield 'b', fn
933 else:
935 else:
934 self.ui.warn(_('%s: No such file in rev %s\n')
936 self.ui.warn(_('%s: No such file in rev %s\n')
935 % (self.pathto(fn), short(node)))
937 % (self.pathto(fn), short(node)))
936 else:
938 else:
937 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
939 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
938 yield src, fn
940 yield src, fn
939
941
940 def status(self, node1=None, node2=None, files=[], match=util.always,
942 def status(self, node1=None, node2=None, files=[], match=util.always,
941 list_ignored=False, list_clean=False):
943 list_ignored=False, list_clean=False):
942 """return status of files between two nodes or node and working directory
944 """return status of files between two nodes or node and working directory
943
945
944 If node1 is None, use the first dirstate parent instead.
946 If node1 is None, use the first dirstate parent instead.
945 If node2 is None, compare node1 with working directory.
947 If node2 is None, compare node1 with working directory.
946 """
948 """
947
949
948 def fcmp(fn, getnode):
950 def fcmp(fn, getnode):
949 t1 = self.wread(fn)
951 t1 = self.wread(fn)
950 return self.file(fn).cmp(getnode(fn), t1)
952 return self.file(fn).cmp(getnode(fn), t1)
951
953
952 def mfmatches(node):
954 def mfmatches(node):
953 change = self.changelog.read(node)
955 change = self.changelog.read(node)
954 mf = self.manifest.read(change[0]).copy()
956 mf = self.manifest.read(change[0]).copy()
955 for fn in mf.keys():
957 for fn in mf.keys():
956 if not match(fn):
958 if not match(fn):
957 del mf[fn]
959 del mf[fn]
958 return mf
960 return mf
959
961
960 modified, added, removed, deleted, unknown = [], [], [], [], []
962 modified, added, removed, deleted, unknown = [], [], [], [], []
961 ignored, clean = [], []
963 ignored, clean = [], []
962
964
963 compareworking = False
965 compareworking = False
964 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
966 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
965 compareworking = True
967 compareworking = True
966
968
967 if not compareworking:
969 if not compareworking:
968 # read the manifest from node1 before the manifest from node2,
970 # read the manifest from node1 before the manifest from node2,
969 # so that we'll hit the manifest cache if we're going through
971 # so that we'll hit the manifest cache if we're going through
970 # all the revisions in parent->child order.
972 # all the revisions in parent->child order.
971 mf1 = mfmatches(node1)
973 mf1 = mfmatches(node1)
972
974
973 # are we comparing the working directory?
975 # are we comparing the working directory?
974 if not node2:
976 if not node2:
975 (lookup, modified, added, removed, deleted, unknown,
977 (lookup, modified, added, removed, deleted, unknown,
976 ignored, clean) = self.dirstate.status(files, match,
978 ignored, clean) = self.dirstate.status(files, match,
977 list_ignored, list_clean)
979 list_ignored, list_clean)
978
980
979 # are we comparing working dir against its parent?
981 # are we comparing working dir against its parent?
980 if compareworking:
982 if compareworking:
981 if lookup:
983 if lookup:
982 fixup = []
984 fixup = []
983 # do a full compare of any files that might have changed
985 # do a full compare of any files that might have changed
984 ctx = self.changectx()
986 ctx = self.changectx()
985 for f in lookup:
987 for f in lookup:
986 if f not in ctx or ctx[f].cmp(self.wread(f)):
988 if f not in ctx or ctx[f].cmp(self.wread(f)):
987 modified.append(f)
989 modified.append(f)
988 else:
990 else:
989 fixup.append(f)
991 fixup.append(f)
990 if list_clean:
992 if list_clean:
991 clean.append(f)
993 clean.append(f)
992
994
993 # update dirstate for files that are actually clean
995 # update dirstate for files that are actually clean
994 if fixup:
996 if fixup:
995 wlock = None
997 wlock = None
996 try:
998 try:
997 try:
999 try:
998 wlock = self.wlock(False)
1000 wlock = self.wlock(False)
999 except lock.LockException:
1001 except lock.LockException:
1000 pass
1002 pass
1001 if wlock:
1003 if wlock:
1002 for f in fixup:
1004 for f in fixup:
1003 self.dirstate.normal(f)
1005 self.dirstate.normal(f)
1004 finally:
1006 finally:
1005 del wlock
1007 del wlock
1006 else:
1008 else:
1007 # we are comparing working dir against non-parent
1009 # we are comparing working dir against non-parent
1008 # generate a pseudo-manifest for the working dir
1010 # generate a pseudo-manifest for the working dir
1009 # XXX: create it in dirstate.py ?
1011 # XXX: create it in dirstate.py ?
1010 mf2 = mfmatches(self.dirstate.parents()[0])
1012 mf2 = mfmatches(self.dirstate.parents()[0])
1011 is_exec = util.execfunc(self.root, mf2.execf)
1013 is_exec = util.execfunc(self.root, mf2.execf)
1012 is_link = util.linkfunc(self.root, mf2.linkf)
1014 is_link = util.linkfunc(self.root, mf2.linkf)
1013 for f in lookup + modified + added:
1015 for f in lookup + modified + added:
1014 mf2[f] = ""
1016 mf2[f] = ""
1015 mf2.set(f, is_exec(f), is_link(f))
1017 mf2.set(f, is_exec(f), is_link(f))
1016 for f in removed:
1018 for f in removed:
1017 if f in mf2:
1019 if f in mf2:
1018 del mf2[f]
1020 del mf2[f]
1019
1021
1020 else:
1022 else:
1021 # we are comparing two revisions
1023 # we are comparing two revisions
1022 mf2 = mfmatches(node2)
1024 mf2 = mfmatches(node2)
1023
1025
1024 if not compareworking:
1026 if not compareworking:
1025 # flush lists from dirstate before comparing manifests
1027 # flush lists from dirstate before comparing manifests
1026 modified, added, clean = [], [], []
1028 modified, added, clean = [], [], []
1027
1029
1028 # make sure to sort the files so we talk to the disk in a
1030 # make sure to sort the files so we talk to the disk in a
1029 # reasonable order
1031 # reasonable order
1030 mf2keys = mf2.keys()
1032 mf2keys = mf2.keys()
1031 mf2keys.sort()
1033 mf2keys.sort()
1032 getnode = lambda fn: mf1.get(fn, nullid)
1034 getnode = lambda fn: mf1.get(fn, nullid)
1033 for fn in mf2keys:
1035 for fn in mf2keys:
1034 if fn in mf1:
1036 if fn in mf1:
1035 if (mf1.flags(fn) != mf2.flags(fn) or
1037 if (mf1.flags(fn) != mf2.flags(fn) or
1036 (mf1[fn] != mf2[fn] and
1038 (mf1[fn] != mf2[fn] and
1037 (mf2[fn] != "" or fcmp(fn, getnode)))):
1039 (mf2[fn] != "" or fcmp(fn, getnode)))):
1038 modified.append(fn)
1040 modified.append(fn)
1039 elif list_clean:
1041 elif list_clean:
1040 clean.append(fn)
1042 clean.append(fn)
1041 del mf1[fn]
1043 del mf1[fn]
1042 else:
1044 else:
1043 added.append(fn)
1045 added.append(fn)
1044
1046
1045 removed = mf1.keys()
1047 removed = mf1.keys()
1046
1048
1047 # sort and return results:
1049 # sort and return results:
1048 for l in modified, added, removed, deleted, unknown, ignored, clean:
1050 for l in modified, added, removed, deleted, unknown, ignored, clean:
1049 l.sort()
1051 l.sort()
1050 return (modified, added, removed, deleted, unknown, ignored, clean)
1052 return (modified, added, removed, deleted, unknown, ignored, clean)
1051
1053
1052 def add(self, list):
1054 def add(self, list):
1053 wlock = self.wlock()
1055 wlock = self.wlock()
1054 try:
1056 try:
1055 rejected = []
1057 rejected = []
1056 for f in list:
1058 for f in list:
1057 p = self.wjoin(f)
1059 p = self.wjoin(f)
1058 try:
1060 try:
1059 st = os.lstat(p)
1061 st = os.lstat(p)
1060 except:
1062 except:
1061 self.ui.warn(_("%s does not exist!\n") % f)
1063 self.ui.warn(_("%s does not exist!\n") % f)
1062 rejected.append(f)
1064 rejected.append(f)
1063 continue
1065 continue
1064 if st.st_size > 10000000:
1066 if st.st_size > 10000000:
1065 self.ui.warn(_("%s: files over 10MB may cause memory and"
1067 self.ui.warn(_("%s: files over 10MB may cause memory and"
1066 " performance problems\n"
1068 " performance problems\n"
1067 "(use 'hg revert %s' to unadd the file)\n")
1069 "(use 'hg revert %s' to unadd the file)\n")
1068 % (f, f))
1070 % (f, f))
1069 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1071 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1070 self.ui.warn(_("%s not added: only files and symlinks "
1072 self.ui.warn(_("%s not added: only files and symlinks "
1071 "supported currently\n") % f)
1073 "supported currently\n") % f)
1072 rejected.append(p)
1074 rejected.append(p)
1073 elif self.dirstate[f] in 'amn':
1075 elif self.dirstate[f] in 'amn':
1074 self.ui.warn(_("%s already tracked!\n") % f)
1076 self.ui.warn(_("%s already tracked!\n") % f)
1075 elif self.dirstate[f] == 'r':
1077 elif self.dirstate[f] == 'r':
1076 self.dirstate.normallookup(f)
1078 self.dirstate.normallookup(f)
1077 else:
1079 else:
1078 self.dirstate.add(f)
1080 self.dirstate.add(f)
1079 return rejected
1081 return rejected
1080 finally:
1082 finally:
1081 del wlock
1083 del wlock
1082
1084
1083 def forget(self, list):
1085 def forget(self, list):
1084 wlock = self.wlock()
1086 wlock = self.wlock()
1085 try:
1087 try:
1086 for f in list:
1088 for f in list:
1087 if self.dirstate[f] != 'a':
1089 if self.dirstate[f] != 'a':
1088 self.ui.warn(_("%s not added!\n") % f)
1090 self.ui.warn(_("%s not added!\n") % f)
1089 else:
1091 else:
1090 self.dirstate.forget(f)
1092 self.dirstate.forget(f)
1091 finally:
1093 finally:
1092 del wlock
1094 del wlock
1093
1095
1094 def remove(self, list, unlink=False):
1096 def remove(self, list, unlink=False):
1095 wlock = None
1097 wlock = None
1096 try:
1098 try:
1097 if unlink:
1099 if unlink:
1098 for f in list:
1100 for f in list:
1099 try:
1101 try:
1100 util.unlink(self.wjoin(f))
1102 util.unlink(self.wjoin(f))
1101 except OSError, inst:
1103 except OSError, inst:
1102 if inst.errno != errno.ENOENT:
1104 if inst.errno != errno.ENOENT:
1103 raise
1105 raise
1104 wlock = self.wlock()
1106 wlock = self.wlock()
1105 for f in list:
1107 for f in list:
1106 if unlink and os.path.exists(self.wjoin(f)):
1108 if unlink and os.path.exists(self.wjoin(f)):
1107 self.ui.warn(_("%s still exists!\n") % f)
1109 self.ui.warn(_("%s still exists!\n") % f)
1108 elif self.dirstate[f] == 'a':
1110 elif self.dirstate[f] == 'a':
1109 self.dirstate.forget(f)
1111 self.dirstate.forget(f)
1110 elif f not in self.dirstate:
1112 elif f not in self.dirstate:
1111 self.ui.warn(_("%s not tracked!\n") % f)
1113 self.ui.warn(_("%s not tracked!\n") % f)
1112 else:
1114 else:
1113 self.dirstate.remove(f)
1115 self.dirstate.remove(f)
1114 finally:
1116 finally:
1115 del wlock
1117 del wlock
1116
1118
1117 def undelete(self, list):
1119 def undelete(self, list):
1118 wlock = None
1120 wlock = None
1119 try:
1121 try:
1120 manifests = [self.manifest.read(self.changelog.read(p)[0])
1122 manifests = [self.manifest.read(self.changelog.read(p)[0])
1121 for p in self.dirstate.parents() if p != nullid]
1123 for p in self.dirstate.parents() if p != nullid]
1122 wlock = self.wlock()
1124 wlock = self.wlock()
1123 for f in list:
1125 for f in list:
1124 if self.dirstate[f] != 'r':
1126 if self.dirstate[f] != 'r':
1125 self.ui.warn("%s not removed!\n" % f)
1127 self.ui.warn("%s not removed!\n" % f)
1126 else:
1128 else:
1127 m = f in manifests[0] and manifests[0] or manifests[1]
1129 m = f in manifests[0] and manifests[0] or manifests[1]
1128 t = self.file(f).read(m[f])
1130 t = self.file(f).read(m[f])
1129 self.wwrite(f, t, m.flags(f))
1131 self.wwrite(f, t, m.flags(f))
1130 self.dirstate.normal(f)
1132 self.dirstate.normal(f)
1131 finally:
1133 finally:
1132 del wlock
1134 del wlock
1133
1135
1134 def copy(self, source, dest):
1136 def copy(self, source, dest):
1135 wlock = None
1137 wlock = None
1136 try:
1138 try:
1137 p = self.wjoin(dest)
1139 p = self.wjoin(dest)
1138 if not (os.path.exists(p) or os.path.islink(p)):
1140 if not (os.path.exists(p) or os.path.islink(p)):
1139 self.ui.warn(_("%s does not exist!\n") % dest)
1141 self.ui.warn(_("%s does not exist!\n") % dest)
1140 elif not (os.path.isfile(p) or os.path.islink(p)):
1142 elif not (os.path.isfile(p) or os.path.islink(p)):
1141 self.ui.warn(_("copy failed: %s is not a file or a "
1143 self.ui.warn(_("copy failed: %s is not a file or a "
1142 "symbolic link\n") % dest)
1144 "symbolic link\n") % dest)
1143 else:
1145 else:
1144 wlock = self.wlock()
1146 wlock = self.wlock()
1145 if dest not in self.dirstate:
1147 if dest not in self.dirstate:
1146 self.dirstate.add(dest)
1148 self.dirstate.add(dest)
1147 self.dirstate.copy(source, dest)
1149 self.dirstate.copy(source, dest)
1148 finally:
1150 finally:
1149 del wlock
1151 del wlock
1150
1152
1151 def heads(self, start=None):
1153 def heads(self, start=None):
1152 heads = self.changelog.heads(start)
1154 heads = self.changelog.heads(start)
1153 # sort the output in rev descending order
1155 # sort the output in rev descending order
1154 heads = [(-self.changelog.rev(h), h) for h in heads]
1156 heads = [(-self.changelog.rev(h), h) for h in heads]
1155 heads.sort()
1157 heads.sort()
1156 return [n for (r, n) in heads]
1158 return [n for (r, n) in heads]
1157
1159
1158 def branchheads(self, branch, start=None):
1160 def branchheads(self, branch, start=None):
1159 branches = self.branchtags()
1161 branches = self.branchtags()
1160 if branch not in branches:
1162 if branch not in branches:
1161 return []
1163 return []
1162 # The basic algorithm is this:
1164 # The basic algorithm is this:
1163 #
1165 #
1164 # Start from the branch tip since there are no later revisions that can
1166 # Start from the branch tip since there are no later revisions that can
1165 # possibly be in this branch, and the tip is a guaranteed head.
1167 # possibly be in this branch, and the tip is a guaranteed head.
1166 #
1168 #
1167 # Remember the tip's parents as the first ancestors, since these by
1169 # Remember the tip's parents as the first ancestors, since these by
1168 # definition are not heads.
1170 # definition are not heads.
1169 #
1171 #
1170 # Step backwards from the brach tip through all the revisions. We are
1172 # Step backwards from the brach tip through all the revisions. We are
1171 # guaranteed by the rules of Mercurial that we will now be visiting the
1173 # guaranteed by the rules of Mercurial that we will now be visiting the
1172 # nodes in reverse topological order (children before parents).
1174 # nodes in reverse topological order (children before parents).
1173 #
1175 #
1174 # If a revision is one of the ancestors of a head then we can toss it
1176 # If a revision is one of the ancestors of a head then we can toss it
1175 # out of the ancestors set (we've already found it and won't be
1177 # out of the ancestors set (we've already found it and won't be
1176 # visiting it again) and put its parents in the ancestors set.
1178 # visiting it again) and put its parents in the ancestors set.
1177 #
1179 #
1178 # Otherwise, if a revision is in the branch it's another head, since it
1180 # Otherwise, if a revision is in the branch it's another head, since it
1179 # wasn't in the ancestor list of an existing head. So add it to the
1181 # wasn't in the ancestor list of an existing head. So add it to the
1180 # head list, and add its parents to the ancestor list.
1182 # head list, and add its parents to the ancestor list.
1181 #
1183 #
1182 # If it is not in the branch ignore it.
1184 # If it is not in the branch ignore it.
1183 #
1185 #
1184 # Once we have a list of heads, use nodesbetween to filter out all the
1186 # Once we have a list of heads, use nodesbetween to filter out all the
1185 # heads that cannot be reached from startrev. There may be a more
1187 # heads that cannot be reached from startrev. There may be a more
1186 # efficient way to do this as part of the previous algorithm.
1188 # efficient way to do this as part of the previous algorithm.
1187
1189
1188 set = util.set
1190 set = util.set
1189 heads = [self.changelog.rev(branches[branch])]
1191 heads = [self.changelog.rev(branches[branch])]
1190 # Don't care if ancestors contains nullrev or not.
1192 # Don't care if ancestors contains nullrev or not.
1191 ancestors = set(self.changelog.parentrevs(heads[0]))
1193 ancestors = set(self.changelog.parentrevs(heads[0]))
1192 for rev in xrange(heads[0] - 1, nullrev, -1):
1194 for rev in xrange(heads[0] - 1, nullrev, -1):
1193 if rev in ancestors:
1195 if rev in ancestors:
1194 ancestors.update(self.changelog.parentrevs(rev))
1196 ancestors.update(self.changelog.parentrevs(rev))
1195 ancestors.remove(rev)
1197 ancestors.remove(rev)
1196 elif self.changectx(rev).branch() == branch:
1198 elif self.changectx(rev).branch() == branch:
1197 heads.append(rev)
1199 heads.append(rev)
1198 ancestors.update(self.changelog.parentrevs(rev))
1200 ancestors.update(self.changelog.parentrevs(rev))
1199 heads = [self.changelog.node(rev) for rev in heads]
1201 heads = [self.changelog.node(rev) for rev in heads]
1200 if start is not None:
1202 if start is not None:
1201 heads = self.changelog.nodesbetween([start], heads)[2]
1203 heads = self.changelog.nodesbetween([start], heads)[2]
1202 return heads
1204 return heads
1203
1205
1204 def branches(self, nodes):
1206 def branches(self, nodes):
1205 if not nodes:
1207 if not nodes:
1206 nodes = [self.changelog.tip()]
1208 nodes = [self.changelog.tip()]
1207 b = []
1209 b = []
1208 for n in nodes:
1210 for n in nodes:
1209 t = n
1211 t = n
1210 while 1:
1212 while 1:
1211 p = self.changelog.parents(n)
1213 p = self.changelog.parents(n)
1212 if p[1] != nullid or p[0] == nullid:
1214 if p[1] != nullid or p[0] == nullid:
1213 b.append((t, n, p[0], p[1]))
1215 b.append((t, n, p[0], p[1]))
1214 break
1216 break
1215 n = p[0]
1217 n = p[0]
1216 return b
1218 return b
1217
1219
1218 def between(self, pairs):
1220 def between(self, pairs):
1219 r = []
1221 r = []
1220
1222
1221 for top, bottom in pairs:
1223 for top, bottom in pairs:
1222 n, l, i = top, [], 0
1224 n, l, i = top, [], 0
1223 f = 1
1225 f = 1
1224
1226
1225 while n != bottom:
1227 while n != bottom:
1226 p = self.changelog.parents(n)[0]
1228 p = self.changelog.parents(n)[0]
1227 if i == f:
1229 if i == f:
1228 l.append(n)
1230 l.append(n)
1229 f = f * 2
1231 f = f * 2
1230 n = p
1232 n = p
1231 i += 1
1233 i += 1
1232
1234
1233 r.append(l)
1235 r.append(l)
1234
1236
1235 return r
1237 return r
1236
1238
1237 def findincoming(self, remote, base=None, heads=None, force=False):
1239 def findincoming(self, remote, base=None, heads=None, force=False):
1238 """Return list of roots of the subsets of missing nodes from remote
1240 """Return list of roots of the subsets of missing nodes from remote
1239
1241
1240 If base dict is specified, assume that these nodes and their parents
1242 If base dict is specified, assume that these nodes and their parents
1241 exist on the remote side and that no child of a node of base exists
1243 exist on the remote side and that no child of a node of base exists
1242 in both remote and self.
1244 in both remote and self.
1243 Furthermore base will be updated to include the nodes that exists
1245 Furthermore base will be updated to include the nodes that exists
1244 in self and remote but no children exists in self and remote.
1246 in self and remote but no children exists in self and remote.
1245 If a list of heads is specified, return only nodes which are heads
1247 If a list of heads is specified, return only nodes which are heads
1246 or ancestors of these heads.
1248 or ancestors of these heads.
1247
1249
1248 All the ancestors of base are in self and in remote.
1250 All the ancestors of base are in self and in remote.
1249 All the descendants of the list returned are missing in self.
1251 All the descendants of the list returned are missing in self.
1250 (and so we know that the rest of the nodes are missing in remote, see
1252 (and so we know that the rest of the nodes are missing in remote, see
1251 outgoing)
1253 outgoing)
1252 """
1254 """
1253 m = self.changelog.nodemap
1255 m = self.changelog.nodemap
1254 search = []
1256 search = []
1255 fetch = {}
1257 fetch = {}
1256 seen = {}
1258 seen = {}
1257 seenbranch = {}
1259 seenbranch = {}
1258 if base == None:
1260 if base == None:
1259 base = {}
1261 base = {}
1260
1262
1261 if not heads:
1263 if not heads:
1262 heads = remote.heads()
1264 heads = remote.heads()
1263
1265
1264 if self.changelog.tip() == nullid:
1266 if self.changelog.tip() == nullid:
1265 base[nullid] = 1
1267 base[nullid] = 1
1266 if heads != [nullid]:
1268 if heads != [nullid]:
1267 return [nullid]
1269 return [nullid]
1268 return []
1270 return []
1269
1271
1270 # assume we're closer to the tip than the root
1272 # assume we're closer to the tip than the root
1271 # and start by examining the heads
1273 # and start by examining the heads
1272 self.ui.status(_("searching for changes\n"))
1274 self.ui.status(_("searching for changes\n"))
1273
1275
1274 unknown = []
1276 unknown = []
1275 for h in heads:
1277 for h in heads:
1276 if h not in m:
1278 if h not in m:
1277 unknown.append(h)
1279 unknown.append(h)
1278 else:
1280 else:
1279 base[h] = 1
1281 base[h] = 1
1280
1282
1281 if not unknown:
1283 if not unknown:
1282 return []
1284 return []
1283
1285
1284 req = dict.fromkeys(unknown)
1286 req = dict.fromkeys(unknown)
1285 reqcnt = 0
1287 reqcnt = 0
1286
1288
1287 # search through remote branches
1289 # search through remote branches
1288 # a 'branch' here is a linear segment of history, with four parts:
1290 # a 'branch' here is a linear segment of history, with four parts:
1289 # head, root, first parent, second parent
1291 # head, root, first parent, second parent
1290 # (a branch always has two parents (or none) by definition)
1292 # (a branch always has two parents (or none) by definition)
1291 unknown = remote.branches(unknown)
1293 unknown = remote.branches(unknown)
1292 while unknown:
1294 while unknown:
1293 r = []
1295 r = []
1294 while unknown:
1296 while unknown:
1295 n = unknown.pop(0)
1297 n = unknown.pop(0)
1296 if n[0] in seen:
1298 if n[0] in seen:
1297 continue
1299 continue
1298
1300
1299 self.ui.debug(_("examining %s:%s\n")
1301 self.ui.debug(_("examining %s:%s\n")
1300 % (short(n[0]), short(n[1])))
1302 % (short(n[0]), short(n[1])))
1301 if n[0] == nullid: # found the end of the branch
1303 if n[0] == nullid: # found the end of the branch
1302 pass
1304 pass
1303 elif n in seenbranch:
1305 elif n in seenbranch:
1304 self.ui.debug(_("branch already found\n"))
1306 self.ui.debug(_("branch already found\n"))
1305 continue
1307 continue
1306 elif n[1] and n[1] in m: # do we know the base?
1308 elif n[1] and n[1] in m: # do we know the base?
1307 self.ui.debug(_("found incomplete branch %s:%s\n")
1309 self.ui.debug(_("found incomplete branch %s:%s\n")
1308 % (short(n[0]), short(n[1])))
1310 % (short(n[0]), short(n[1])))
1309 search.append(n) # schedule branch range for scanning
1311 search.append(n) # schedule branch range for scanning
1310 seenbranch[n] = 1
1312 seenbranch[n] = 1
1311 else:
1313 else:
1312 if n[1] not in seen and n[1] not in fetch:
1314 if n[1] not in seen and n[1] not in fetch:
1313 if n[2] in m and n[3] in m:
1315 if n[2] in m and n[3] in m:
1314 self.ui.debug(_("found new changeset %s\n") %
1316 self.ui.debug(_("found new changeset %s\n") %
1315 short(n[1]))
1317 short(n[1]))
1316 fetch[n[1]] = 1 # earliest unknown
1318 fetch[n[1]] = 1 # earliest unknown
1317 for p in n[2:4]:
1319 for p in n[2:4]:
1318 if p in m:
1320 if p in m:
1319 base[p] = 1 # latest known
1321 base[p] = 1 # latest known
1320
1322
1321 for p in n[2:4]:
1323 for p in n[2:4]:
1322 if p not in req and p not in m:
1324 if p not in req and p not in m:
1323 r.append(p)
1325 r.append(p)
1324 req[p] = 1
1326 req[p] = 1
1325 seen[n[0]] = 1
1327 seen[n[0]] = 1
1326
1328
1327 if r:
1329 if r:
1328 reqcnt += 1
1330 reqcnt += 1
1329 self.ui.debug(_("request %d: %s\n") %
1331 self.ui.debug(_("request %d: %s\n") %
1330 (reqcnt, " ".join(map(short, r))))
1332 (reqcnt, " ".join(map(short, r))))
1331 for p in xrange(0, len(r), 10):
1333 for p in xrange(0, len(r), 10):
1332 for b in remote.branches(r[p:p+10]):
1334 for b in remote.branches(r[p:p+10]):
1333 self.ui.debug(_("received %s:%s\n") %
1335 self.ui.debug(_("received %s:%s\n") %
1334 (short(b[0]), short(b[1])))
1336 (short(b[0]), short(b[1])))
1335 unknown.append(b)
1337 unknown.append(b)
1336
1338
1337 # do binary search on the branches we found
1339 # do binary search on the branches we found
1338 while search:
1340 while search:
1339 n = search.pop(0)
1341 n = search.pop(0)
1340 reqcnt += 1
1342 reqcnt += 1
1341 l = remote.between([(n[0], n[1])])[0]
1343 l = remote.between([(n[0], n[1])])[0]
1342 l.append(n[1])
1344 l.append(n[1])
1343 p = n[0]
1345 p = n[0]
1344 f = 1
1346 f = 1
1345 for i in l:
1347 for i in l:
1346 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1348 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1347 if i in m:
1349 if i in m:
1348 if f <= 2:
1350 if f <= 2:
1349 self.ui.debug(_("found new branch changeset %s\n") %
1351 self.ui.debug(_("found new branch changeset %s\n") %
1350 short(p))
1352 short(p))
1351 fetch[p] = 1
1353 fetch[p] = 1
1352 base[i] = 1
1354 base[i] = 1
1353 else:
1355 else:
1354 self.ui.debug(_("narrowed branch search to %s:%s\n")
1356 self.ui.debug(_("narrowed branch search to %s:%s\n")
1355 % (short(p), short(i)))
1357 % (short(p), short(i)))
1356 search.append((p, i))
1358 search.append((p, i))
1357 break
1359 break
1358 p, f = i, f * 2
1360 p, f = i, f * 2
1359
1361
1360 # sanity check our fetch list
1362 # sanity check our fetch list
1361 for f in fetch.keys():
1363 for f in fetch.keys():
1362 if f in m:
1364 if f in m:
1363 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1365 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1364
1366
1365 if base.keys() == [nullid]:
1367 if base.keys() == [nullid]:
1366 if force:
1368 if force:
1367 self.ui.warn(_("warning: repository is unrelated\n"))
1369 self.ui.warn(_("warning: repository is unrelated\n"))
1368 else:
1370 else:
1369 raise util.Abort(_("repository is unrelated"))
1371 raise util.Abort(_("repository is unrelated"))
1370
1372
1371 self.ui.debug(_("found new changesets starting at ") +
1373 self.ui.debug(_("found new changesets starting at ") +
1372 " ".join([short(f) for f in fetch]) + "\n")
1374 " ".join([short(f) for f in fetch]) + "\n")
1373
1375
1374 self.ui.debug(_("%d total queries\n") % reqcnt)
1376 self.ui.debug(_("%d total queries\n") % reqcnt)
1375
1377
1376 return fetch.keys()
1378 return fetch.keys()
1377
1379
1378 def findoutgoing(self, remote, base=None, heads=None, force=False):
1380 def findoutgoing(self, remote, base=None, heads=None, force=False):
1379 """Return list of nodes that are roots of subsets not in remote
1381 """Return list of nodes that are roots of subsets not in remote
1380
1382
1381 If base dict is specified, assume that these nodes and their parents
1383 If base dict is specified, assume that these nodes and their parents
1382 exist on the remote side.
1384 exist on the remote side.
1383 If a list of heads is specified, return only nodes which are heads
1385 If a list of heads is specified, return only nodes which are heads
1384 or ancestors of these heads, and return a second element which
1386 or ancestors of these heads, and return a second element which
1385 contains all remote heads which get new children.
1387 contains all remote heads which get new children.
1386 """
1388 """
1387 if base == None:
1389 if base == None:
1388 base = {}
1390 base = {}
1389 self.findincoming(remote, base, heads, force=force)
1391 self.findincoming(remote, base, heads, force=force)
1390
1392
1391 self.ui.debug(_("common changesets up to ")
1393 self.ui.debug(_("common changesets up to ")
1392 + " ".join(map(short, base.keys())) + "\n")
1394 + " ".join(map(short, base.keys())) + "\n")
1393
1395
1394 remain = dict.fromkeys(self.changelog.nodemap)
1396 remain = dict.fromkeys(self.changelog.nodemap)
1395
1397
1396 # prune everything remote has from the tree
1398 # prune everything remote has from the tree
1397 del remain[nullid]
1399 del remain[nullid]
1398 remove = base.keys()
1400 remove = base.keys()
1399 while remove:
1401 while remove:
1400 n = remove.pop(0)
1402 n = remove.pop(0)
1401 if n in remain:
1403 if n in remain:
1402 del remain[n]
1404 del remain[n]
1403 for p in self.changelog.parents(n):
1405 for p in self.changelog.parents(n):
1404 remove.append(p)
1406 remove.append(p)
1405
1407
1406 # find every node whose parents have been pruned
1408 # find every node whose parents have been pruned
1407 subset = []
1409 subset = []
1408 # find every remote head that will get new children
1410 # find every remote head that will get new children
1409 updated_heads = {}
1411 updated_heads = {}
1410 for n in remain:
1412 for n in remain:
1411 p1, p2 = self.changelog.parents(n)
1413 p1, p2 = self.changelog.parents(n)
1412 if p1 not in remain and p2 not in remain:
1414 if p1 not in remain and p2 not in remain:
1413 subset.append(n)
1415 subset.append(n)
1414 if heads:
1416 if heads:
1415 if p1 in heads:
1417 if p1 in heads:
1416 updated_heads[p1] = True
1418 updated_heads[p1] = True
1417 if p2 in heads:
1419 if p2 in heads:
1418 updated_heads[p2] = True
1420 updated_heads[p2] = True
1419
1421
1420 # this is the set of all roots we have to push
1422 # this is the set of all roots we have to push
1421 if heads:
1423 if heads:
1422 return subset, updated_heads.keys()
1424 return subset, updated_heads.keys()
1423 else:
1425 else:
1424 return subset
1426 return subset
1425
1427
1426 def pull(self, remote, heads=None, force=False):
1428 def pull(self, remote, heads=None, force=False):
1427 lock = self.lock()
1429 lock = self.lock()
1428 try:
1430 try:
1429 fetch = self.findincoming(remote, heads=heads, force=force)
1431 fetch = self.findincoming(remote, heads=heads, force=force)
1430 if fetch == [nullid]:
1432 if fetch == [nullid]:
1431 self.ui.status(_("requesting all changes\n"))
1433 self.ui.status(_("requesting all changes\n"))
1432
1434
1433 if not fetch:
1435 if not fetch:
1434 self.ui.status(_("no changes found\n"))
1436 self.ui.status(_("no changes found\n"))
1435 return 0
1437 return 0
1436
1438
1437 if heads is None:
1439 if heads is None:
1438 cg = remote.changegroup(fetch, 'pull')
1440 cg = remote.changegroup(fetch, 'pull')
1439 else:
1441 else:
1440 if 'changegroupsubset' not in remote.capabilities:
1442 if 'changegroupsubset' not in remote.capabilities:
1441 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1443 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1442 cg = remote.changegroupsubset(fetch, heads, 'pull')
1444 cg = remote.changegroupsubset(fetch, heads, 'pull')
1443 return self.addchangegroup(cg, 'pull', remote.url())
1445 return self.addchangegroup(cg, 'pull', remote.url())
1444 finally:
1446 finally:
1445 del lock
1447 del lock
1446
1448
1447 def push(self, remote, force=False, revs=None):
1449 def push(self, remote, force=False, revs=None):
1448 # there are two ways to push to remote repo:
1450 # there are two ways to push to remote repo:
1449 #
1451 #
1450 # addchangegroup assumes local user can lock remote
1452 # addchangegroup assumes local user can lock remote
1451 # repo (local filesystem, old ssh servers).
1453 # repo (local filesystem, old ssh servers).
1452 #
1454 #
1453 # unbundle assumes local user cannot lock remote repo (new ssh
1455 # unbundle assumes local user cannot lock remote repo (new ssh
1454 # servers, http servers).
1456 # servers, http servers).
1455
1457
1456 if remote.capable('unbundle'):
1458 if remote.capable('unbundle'):
1457 return self.push_unbundle(remote, force, revs)
1459 return self.push_unbundle(remote, force, revs)
1458 return self.push_addchangegroup(remote, force, revs)
1460 return self.push_addchangegroup(remote, force, revs)
1459
1461
1460 def prepush(self, remote, force, revs):
1462 def prepush(self, remote, force, revs):
1461 base = {}
1463 base = {}
1462 remote_heads = remote.heads()
1464 remote_heads = remote.heads()
1463 inc = self.findincoming(remote, base, remote_heads, force=force)
1465 inc = self.findincoming(remote, base, remote_heads, force=force)
1464
1466
1465 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1467 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1466 if revs is not None:
1468 if revs is not None:
1467 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1469 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1468 else:
1470 else:
1469 bases, heads = update, self.changelog.heads()
1471 bases, heads = update, self.changelog.heads()
1470
1472
1471 if not bases:
1473 if not bases:
1472 self.ui.status(_("no changes found\n"))
1474 self.ui.status(_("no changes found\n"))
1473 return None, 1
1475 return None, 1
1474 elif not force:
1476 elif not force:
1475 # check if we're creating new remote heads
1477 # check if we're creating new remote heads
1476 # to be a remote head after push, node must be either
1478 # to be a remote head after push, node must be either
1477 # - unknown locally
1479 # - unknown locally
1478 # - a local outgoing head descended from update
1480 # - a local outgoing head descended from update
1479 # - a remote head that's known locally and not
1481 # - a remote head that's known locally and not
1480 # ancestral to an outgoing head
1482 # ancestral to an outgoing head
1481
1483
1482 warn = 0
1484 warn = 0
1483
1485
1484 if remote_heads == [nullid]:
1486 if remote_heads == [nullid]:
1485 warn = 0
1487 warn = 0
1486 elif not revs and len(heads) > len(remote_heads):
1488 elif not revs and len(heads) > len(remote_heads):
1487 warn = 1
1489 warn = 1
1488 else:
1490 else:
1489 newheads = list(heads)
1491 newheads = list(heads)
1490 for r in remote_heads:
1492 for r in remote_heads:
1491 if r in self.changelog.nodemap:
1493 if r in self.changelog.nodemap:
1492 desc = self.changelog.heads(r, heads)
1494 desc = self.changelog.heads(r, heads)
1493 l = [h for h in heads if h in desc]
1495 l = [h for h in heads if h in desc]
1494 if not l:
1496 if not l:
1495 newheads.append(r)
1497 newheads.append(r)
1496 else:
1498 else:
1497 newheads.append(r)
1499 newheads.append(r)
1498 if len(newheads) > len(remote_heads):
1500 if len(newheads) > len(remote_heads):
1499 warn = 1
1501 warn = 1
1500
1502
1501 if warn:
1503 if warn:
1502 self.ui.warn(_("abort: push creates new remote branches!\n"))
1504 self.ui.warn(_("abort: push creates new remote branches!\n"))
1503 self.ui.status(_("(did you forget to merge?"
1505 self.ui.status(_("(did you forget to merge?"
1504 " use push -f to force)\n"))
1506 " use push -f to force)\n"))
1505 return None, 1
1507 return None, 1
1506 elif inc:
1508 elif inc:
1507 self.ui.warn(_("note: unsynced remote changes!\n"))
1509 self.ui.warn(_("note: unsynced remote changes!\n"))
1508
1510
1509
1511
1510 if revs is None:
1512 if revs is None:
1511 cg = self.changegroup(update, 'push')
1513 cg = self.changegroup(update, 'push')
1512 else:
1514 else:
1513 cg = self.changegroupsubset(update, revs, 'push')
1515 cg = self.changegroupsubset(update, revs, 'push')
1514 return cg, remote_heads
1516 return cg, remote_heads
1515
1517
1516 def push_addchangegroup(self, remote, force, revs):
1518 def push_addchangegroup(self, remote, force, revs):
1517 lock = remote.lock()
1519 lock = remote.lock()
1518 try:
1520 try:
1519 ret = self.prepush(remote, force, revs)
1521 ret = self.prepush(remote, force, revs)
1520 if ret[0] is not None:
1522 if ret[0] is not None:
1521 cg, remote_heads = ret
1523 cg, remote_heads = ret
1522 return remote.addchangegroup(cg, 'push', self.url())
1524 return remote.addchangegroup(cg, 'push', self.url())
1523 return ret[1]
1525 return ret[1]
1524 finally:
1526 finally:
1525 del lock
1527 del lock
1526
1528
1527 def push_unbundle(self, remote, force, revs):
1529 def push_unbundle(self, remote, force, revs):
1528 # local repo finds heads on server, finds out what revs it
1530 # local repo finds heads on server, finds out what revs it
1529 # must push. once revs transferred, if server finds it has
1531 # must push. once revs transferred, if server finds it has
1530 # different heads (someone else won commit/push race), server
1532 # different heads (someone else won commit/push race), server
1531 # aborts.
1533 # aborts.
1532
1534
1533 ret = self.prepush(remote, force, revs)
1535 ret = self.prepush(remote, force, revs)
1534 if ret[0] is not None:
1536 if ret[0] is not None:
1535 cg, remote_heads = ret
1537 cg, remote_heads = ret
1536 if force: remote_heads = ['force']
1538 if force: remote_heads = ['force']
1537 return remote.unbundle(cg, remote_heads, 'push')
1539 return remote.unbundle(cg, remote_heads, 'push')
1538 return ret[1]
1540 return ret[1]
1539
1541
1540 def changegroupinfo(self, nodes, source):
1542 def changegroupinfo(self, nodes, source):
1541 if self.ui.verbose or source == 'bundle':
1543 if self.ui.verbose or source == 'bundle':
1542 self.ui.status(_("%d changesets found\n") % len(nodes))
1544 self.ui.status(_("%d changesets found\n") % len(nodes))
1543 if self.ui.debugflag:
1545 if self.ui.debugflag:
1544 self.ui.debug(_("List of changesets:\n"))
1546 self.ui.debug(_("List of changesets:\n"))
1545 for node in nodes:
1547 for node in nodes:
1546 self.ui.debug("%s\n" % hex(node))
1548 self.ui.debug("%s\n" % hex(node))
1547
1549
1548 def changegroupsubset(self, bases, heads, source, extranodes=None):
1550 def changegroupsubset(self, bases, heads, source, extranodes=None):
1549 """This function generates a changegroup consisting of all the nodes
1551 """This function generates a changegroup consisting of all the nodes
1550 that are descendents of any of the bases, and ancestors of any of
1552 that are descendents of any of the bases, and ancestors of any of
1551 the heads.
1553 the heads.
1552
1554
1553 It is fairly complex as determining which filenodes and which
1555 It is fairly complex as determining which filenodes and which
1554 manifest nodes need to be included for the changeset to be complete
1556 manifest nodes need to be included for the changeset to be complete
1555 is non-trivial.
1557 is non-trivial.
1556
1558
1557 Another wrinkle is doing the reverse, figuring out which changeset in
1559 Another wrinkle is doing the reverse, figuring out which changeset in
1558 the changegroup a particular filenode or manifestnode belongs to.
1560 the changegroup a particular filenode or manifestnode belongs to.
1559
1561
1560 The caller can specify some nodes that must be included in the
1562 The caller can specify some nodes that must be included in the
1561 changegroup using the extranodes argument. It should be a dict
1563 changegroup using the extranodes argument. It should be a dict
1562 where the keys are the filenames (or 1 for the manifest), and the
1564 where the keys are the filenames (or 1 for the manifest), and the
1563 values are lists of (node, linknode) tuples, where node is a wanted
1565 values are lists of (node, linknode) tuples, where node is a wanted
1564 node and linknode is the changelog node that should be transmitted as
1566 node and linknode is the changelog node that should be transmitted as
1565 the linkrev.
1567 the linkrev.
1566 """
1568 """
1567
1569
1568 self.hook('preoutgoing', throw=True, source=source)
1570 self.hook('preoutgoing', throw=True, source=source)
1569
1571
1570 # Set up some initial variables
1572 # Set up some initial variables
1571 # Make it easy to refer to self.changelog
1573 # Make it easy to refer to self.changelog
1572 cl = self.changelog
1574 cl = self.changelog
1573 # msng is short for missing - compute the list of changesets in this
1575 # msng is short for missing - compute the list of changesets in this
1574 # changegroup.
1576 # changegroup.
1575 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1577 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1576 self.changegroupinfo(msng_cl_lst, source)
1578 self.changegroupinfo(msng_cl_lst, source)
1577 # Some bases may turn out to be superfluous, and some heads may be
1579 # Some bases may turn out to be superfluous, and some heads may be
1578 # too. nodesbetween will return the minimal set of bases and heads
1580 # too. nodesbetween will return the minimal set of bases and heads
1579 # necessary to re-create the changegroup.
1581 # necessary to re-create the changegroup.
1580
1582
1581 # Known heads are the list of heads that it is assumed the recipient
1583 # Known heads are the list of heads that it is assumed the recipient
1582 # of this changegroup will know about.
1584 # of this changegroup will know about.
1583 knownheads = {}
1585 knownheads = {}
1584 # We assume that all parents of bases are known heads.
1586 # We assume that all parents of bases are known heads.
1585 for n in bases:
1587 for n in bases:
1586 for p in cl.parents(n):
1588 for p in cl.parents(n):
1587 if p != nullid:
1589 if p != nullid:
1588 knownheads[p] = 1
1590 knownheads[p] = 1
1589 knownheads = knownheads.keys()
1591 knownheads = knownheads.keys()
1590 if knownheads:
1592 if knownheads:
1591 # Now that we know what heads are known, we can compute which
1593 # Now that we know what heads are known, we can compute which
1592 # changesets are known. The recipient must know about all
1594 # changesets are known. The recipient must know about all
1593 # changesets required to reach the known heads from the null
1595 # changesets required to reach the known heads from the null
1594 # changeset.
1596 # changeset.
1595 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1597 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1596 junk = None
1598 junk = None
1597 # Transform the list into an ersatz set.
1599 # Transform the list into an ersatz set.
1598 has_cl_set = dict.fromkeys(has_cl_set)
1600 has_cl_set = dict.fromkeys(has_cl_set)
1599 else:
1601 else:
1600 # If there were no known heads, the recipient cannot be assumed to
1602 # If there were no known heads, the recipient cannot be assumed to
1601 # know about any changesets.
1603 # know about any changesets.
1602 has_cl_set = {}
1604 has_cl_set = {}
1603
1605
1604 # Make it easy to refer to self.manifest
1606 # Make it easy to refer to self.manifest
1605 mnfst = self.manifest
1607 mnfst = self.manifest
1606 # We don't know which manifests are missing yet
1608 # We don't know which manifests are missing yet
1607 msng_mnfst_set = {}
1609 msng_mnfst_set = {}
1608 # Nor do we know which filenodes are missing.
1610 # Nor do we know which filenodes are missing.
1609 msng_filenode_set = {}
1611 msng_filenode_set = {}
1610
1612
1611 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1613 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1612 junk = None
1614 junk = None
1613
1615
1614 # A changeset always belongs to itself, so the changenode lookup
1616 # A changeset always belongs to itself, so the changenode lookup
1615 # function for a changenode is identity.
1617 # function for a changenode is identity.
1616 def identity(x):
1618 def identity(x):
1617 return x
1619 return x
1618
1620
1619 # A function generating function. Sets up an environment for the
1621 # A function generating function. Sets up an environment for the
1620 # inner function.
1622 # inner function.
1621 def cmp_by_rev_func(revlog):
1623 def cmp_by_rev_func(revlog):
1622 # Compare two nodes by their revision number in the environment's
1624 # Compare two nodes by their revision number in the environment's
1623 # revision history. Since the revision number both represents the
1625 # revision history. Since the revision number both represents the
1624 # most efficient order to read the nodes in, and represents a
1626 # most efficient order to read the nodes in, and represents a
1625 # topological sorting of the nodes, this function is often useful.
1627 # topological sorting of the nodes, this function is often useful.
1626 def cmp_by_rev(a, b):
1628 def cmp_by_rev(a, b):
1627 return cmp(revlog.rev(a), revlog.rev(b))
1629 return cmp(revlog.rev(a), revlog.rev(b))
1628 return cmp_by_rev
1630 return cmp_by_rev
1629
1631
1630 # If we determine that a particular file or manifest node must be a
1632 # If we determine that a particular file or manifest node must be a
1631 # node that the recipient of the changegroup will already have, we can
1633 # node that the recipient of the changegroup will already have, we can
1632 # also assume the recipient will have all the parents. This function
1634 # also assume the recipient will have all the parents. This function
1633 # prunes them from the set of missing nodes.
1635 # prunes them from the set of missing nodes.
1634 def prune_parents(revlog, hasset, msngset):
1636 def prune_parents(revlog, hasset, msngset):
1635 haslst = hasset.keys()
1637 haslst = hasset.keys()
1636 haslst.sort(cmp_by_rev_func(revlog))
1638 haslst.sort(cmp_by_rev_func(revlog))
1637 for node in haslst:
1639 for node in haslst:
1638 parentlst = [p for p in revlog.parents(node) if p != nullid]
1640 parentlst = [p for p in revlog.parents(node) if p != nullid]
1639 while parentlst:
1641 while parentlst:
1640 n = parentlst.pop()
1642 n = parentlst.pop()
1641 if n not in hasset:
1643 if n not in hasset:
1642 hasset[n] = 1
1644 hasset[n] = 1
1643 p = [p for p in revlog.parents(n) if p != nullid]
1645 p = [p for p in revlog.parents(n) if p != nullid]
1644 parentlst.extend(p)
1646 parentlst.extend(p)
1645 for n in hasset:
1647 for n in hasset:
1646 msngset.pop(n, None)
1648 msngset.pop(n, None)
1647
1649
1648 # This is a function generating function used to set up an environment
1650 # This is a function generating function used to set up an environment
1649 # for the inner function to execute in.
1651 # for the inner function to execute in.
1650 def manifest_and_file_collector(changedfileset):
1652 def manifest_and_file_collector(changedfileset):
1651 # This is an information gathering function that gathers
1653 # This is an information gathering function that gathers
1652 # information from each changeset node that goes out as part of
1654 # information from each changeset node that goes out as part of
1653 # the changegroup. The information gathered is a list of which
1655 # the changegroup. The information gathered is a list of which
1654 # manifest nodes are potentially required (the recipient may
1656 # manifest nodes are potentially required (the recipient may
1655 # already have them) and total list of all files which were
1657 # already have them) and total list of all files which were
1656 # changed in any changeset in the changegroup.
1658 # changed in any changeset in the changegroup.
1657 #
1659 #
1658 # We also remember the first changenode we saw any manifest
1660 # We also remember the first changenode we saw any manifest
1659 # referenced by so we can later determine which changenode 'owns'
1661 # referenced by so we can later determine which changenode 'owns'
1660 # the manifest.
1662 # the manifest.
1661 def collect_manifests_and_files(clnode):
1663 def collect_manifests_and_files(clnode):
1662 c = cl.read(clnode)
1664 c = cl.read(clnode)
1663 for f in c[3]:
1665 for f in c[3]:
1664 # This is to make sure we only have one instance of each
1666 # This is to make sure we only have one instance of each
1665 # filename string for each filename.
1667 # filename string for each filename.
1666 changedfileset.setdefault(f, f)
1668 changedfileset.setdefault(f, f)
1667 msng_mnfst_set.setdefault(c[0], clnode)
1669 msng_mnfst_set.setdefault(c[0], clnode)
1668 return collect_manifests_and_files
1670 return collect_manifests_and_files
1669
1671
1670 # Figure out which manifest nodes (of the ones we think might be part
1672 # Figure out which manifest nodes (of the ones we think might be part
1671 # of the changegroup) the recipient must know about and remove them
1673 # of the changegroup) the recipient must know about and remove them
1672 # from the changegroup.
1674 # from the changegroup.
1673 def prune_manifests():
1675 def prune_manifests():
1674 has_mnfst_set = {}
1676 has_mnfst_set = {}
1675 for n in msng_mnfst_set:
1677 for n in msng_mnfst_set:
1676 # If a 'missing' manifest thinks it belongs to a changenode
1678 # If a 'missing' manifest thinks it belongs to a changenode
1677 # the recipient is assumed to have, obviously the recipient
1679 # the recipient is assumed to have, obviously the recipient
1678 # must have that manifest.
1680 # must have that manifest.
1679 linknode = cl.node(mnfst.linkrev(n))
1681 linknode = cl.node(mnfst.linkrev(n))
1680 if linknode in has_cl_set:
1682 if linknode in has_cl_set:
1681 has_mnfst_set[n] = 1
1683 has_mnfst_set[n] = 1
1682 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1684 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1683
1685
1684 # Use the information collected in collect_manifests_and_files to say
1686 # Use the information collected in collect_manifests_and_files to say
1685 # which changenode any manifestnode belongs to.
1687 # which changenode any manifestnode belongs to.
1686 def lookup_manifest_link(mnfstnode):
1688 def lookup_manifest_link(mnfstnode):
1687 return msng_mnfst_set[mnfstnode]
1689 return msng_mnfst_set[mnfstnode]
1688
1690
1689 # A function generating function that sets up the initial environment
1691 # A function generating function that sets up the initial environment
1690 # the inner function.
1692 # the inner function.
1691 def filenode_collector(changedfiles):
1693 def filenode_collector(changedfiles):
1692 next_rev = [0]
1694 next_rev = [0]
1693 # This gathers information from each manifestnode included in the
1695 # This gathers information from each manifestnode included in the
1694 # changegroup about which filenodes the manifest node references
1696 # changegroup about which filenodes the manifest node references
1695 # so we can include those in the changegroup too.
1697 # so we can include those in the changegroup too.
1696 #
1698 #
1697 # It also remembers which changenode each filenode belongs to. It
1699 # It also remembers which changenode each filenode belongs to. It
1698 # does this by assuming the a filenode belongs to the changenode
1700 # does this by assuming the a filenode belongs to the changenode
1699 # the first manifest that references it belongs to.
1701 # the first manifest that references it belongs to.
1700 def collect_msng_filenodes(mnfstnode):
1702 def collect_msng_filenodes(mnfstnode):
1701 r = mnfst.rev(mnfstnode)
1703 r = mnfst.rev(mnfstnode)
1702 if r == next_rev[0]:
1704 if r == next_rev[0]:
1703 # If the last rev we looked at was the one just previous,
1705 # If the last rev we looked at was the one just previous,
1704 # we only need to see a diff.
1706 # we only need to see a diff.
1705 deltamf = mnfst.readdelta(mnfstnode)
1707 deltamf = mnfst.readdelta(mnfstnode)
1706 # For each line in the delta
1708 # For each line in the delta
1707 for f, fnode in deltamf.items():
1709 for f, fnode in deltamf.items():
1708 f = changedfiles.get(f, None)
1710 f = changedfiles.get(f, None)
1709 # And if the file is in the list of files we care
1711 # And if the file is in the list of files we care
1710 # about.
1712 # about.
1711 if f is not None:
1713 if f is not None:
1712 # Get the changenode this manifest belongs to
1714 # Get the changenode this manifest belongs to
1713 clnode = msng_mnfst_set[mnfstnode]
1715 clnode = msng_mnfst_set[mnfstnode]
1714 # Create the set of filenodes for the file if
1716 # Create the set of filenodes for the file if
1715 # there isn't one already.
1717 # there isn't one already.
1716 ndset = msng_filenode_set.setdefault(f, {})
1718 ndset = msng_filenode_set.setdefault(f, {})
1717 # And set the filenode's changelog node to the
1719 # And set the filenode's changelog node to the
1718 # manifest's if it hasn't been set already.
1720 # manifest's if it hasn't been set already.
1719 ndset.setdefault(fnode, clnode)
1721 ndset.setdefault(fnode, clnode)
1720 else:
1722 else:
1721 # Otherwise we need a full manifest.
1723 # Otherwise we need a full manifest.
1722 m = mnfst.read(mnfstnode)
1724 m = mnfst.read(mnfstnode)
1723 # For every file in we care about.
1725 # For every file in we care about.
1724 for f in changedfiles:
1726 for f in changedfiles:
1725 fnode = m.get(f, None)
1727 fnode = m.get(f, None)
1726 # If it's in the manifest
1728 # If it's in the manifest
1727 if fnode is not None:
1729 if fnode is not None:
1728 # See comments above.
1730 # See comments above.
1729 clnode = msng_mnfst_set[mnfstnode]
1731 clnode = msng_mnfst_set[mnfstnode]
1730 ndset = msng_filenode_set.setdefault(f, {})
1732 ndset = msng_filenode_set.setdefault(f, {})
1731 ndset.setdefault(fnode, clnode)
1733 ndset.setdefault(fnode, clnode)
1732 # Remember the revision we hope to see next.
1734 # Remember the revision we hope to see next.
1733 next_rev[0] = r + 1
1735 next_rev[0] = r + 1
1734 return collect_msng_filenodes
1736 return collect_msng_filenodes
1735
1737
1736 # We have a list of filenodes we think we need for a file, lets remove
1738 # We have a list of filenodes we think we need for a file, lets remove
1737 # all those we now the recipient must have.
1739 # all those we now the recipient must have.
1738 def prune_filenodes(f, filerevlog):
1740 def prune_filenodes(f, filerevlog):
1739 msngset = msng_filenode_set[f]
1741 msngset = msng_filenode_set[f]
1740 hasset = {}
1742 hasset = {}
1741 # If a 'missing' filenode thinks it belongs to a changenode we
1743 # If a 'missing' filenode thinks it belongs to a changenode we
1742 # assume the recipient must have, then the recipient must have
1744 # assume the recipient must have, then the recipient must have
1743 # that filenode.
1745 # that filenode.
1744 for n in msngset:
1746 for n in msngset:
1745 clnode = cl.node(filerevlog.linkrev(n))
1747 clnode = cl.node(filerevlog.linkrev(n))
1746 if clnode in has_cl_set:
1748 if clnode in has_cl_set:
1747 hasset[n] = 1
1749 hasset[n] = 1
1748 prune_parents(filerevlog, hasset, msngset)
1750 prune_parents(filerevlog, hasset, msngset)
1749
1751
1750 # A function generator function that sets up the a context for the
1752 # A function generator function that sets up the a context for the
1751 # inner function.
1753 # inner function.
1752 def lookup_filenode_link_func(fname):
1754 def lookup_filenode_link_func(fname):
1753 msngset = msng_filenode_set[fname]
1755 msngset = msng_filenode_set[fname]
1754 # Lookup the changenode the filenode belongs to.
1756 # Lookup the changenode the filenode belongs to.
1755 def lookup_filenode_link(fnode):
1757 def lookup_filenode_link(fnode):
1756 return msngset[fnode]
1758 return msngset[fnode]
1757 return lookup_filenode_link
1759 return lookup_filenode_link
1758
1760
1759 # Add the nodes that were explicitly requested.
1761 # Add the nodes that were explicitly requested.
1760 def add_extra_nodes(name, nodes):
1762 def add_extra_nodes(name, nodes):
1761 if not extranodes or name not in extranodes:
1763 if not extranodes or name not in extranodes:
1762 return
1764 return
1763
1765
1764 for node, linknode in extranodes[name]:
1766 for node, linknode in extranodes[name]:
1765 if node not in nodes:
1767 if node not in nodes:
1766 nodes[node] = linknode
1768 nodes[node] = linknode
1767
1769
1768 # Now that we have all theses utility functions to help out and
1770 # Now that we have all theses utility functions to help out and
1769 # logically divide up the task, generate the group.
1771 # logically divide up the task, generate the group.
1770 def gengroup():
1772 def gengroup():
1771 # The set of changed files starts empty.
1773 # The set of changed files starts empty.
1772 changedfiles = {}
1774 changedfiles = {}
1773 # Create a changenode group generator that will call our functions
1775 # Create a changenode group generator that will call our functions
1774 # back to lookup the owning changenode and collect information.
1776 # back to lookup the owning changenode and collect information.
1775 group = cl.group(msng_cl_lst, identity,
1777 group = cl.group(msng_cl_lst, identity,
1776 manifest_and_file_collector(changedfiles))
1778 manifest_and_file_collector(changedfiles))
1777 for chnk in group:
1779 for chnk in group:
1778 yield chnk
1780 yield chnk
1779
1781
1780 # The list of manifests has been collected by the generator
1782 # The list of manifests has been collected by the generator
1781 # calling our functions back.
1783 # calling our functions back.
1782 prune_manifests()
1784 prune_manifests()
1783 add_extra_nodes(1, msng_mnfst_set)
1785 add_extra_nodes(1, msng_mnfst_set)
1784 msng_mnfst_lst = msng_mnfst_set.keys()
1786 msng_mnfst_lst = msng_mnfst_set.keys()
1785 # Sort the manifestnodes by revision number.
1787 # Sort the manifestnodes by revision number.
1786 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1788 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1787 # Create a generator for the manifestnodes that calls our lookup
1789 # Create a generator for the manifestnodes that calls our lookup
1788 # and data collection functions back.
1790 # and data collection functions back.
1789 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1791 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1790 filenode_collector(changedfiles))
1792 filenode_collector(changedfiles))
1791 for chnk in group:
1793 for chnk in group:
1792 yield chnk
1794 yield chnk
1793
1795
1794 # These are no longer needed, dereference and toss the memory for
1796 # These are no longer needed, dereference and toss the memory for
1795 # them.
1797 # them.
1796 msng_mnfst_lst = None
1798 msng_mnfst_lst = None
1797 msng_mnfst_set.clear()
1799 msng_mnfst_set.clear()
1798
1800
1799 if extranodes:
1801 if extranodes:
1800 for fname in extranodes:
1802 for fname in extranodes:
1801 if isinstance(fname, int):
1803 if isinstance(fname, int):
1802 continue
1804 continue
1803 add_extra_nodes(fname,
1805 add_extra_nodes(fname,
1804 msng_filenode_set.setdefault(fname, {}))
1806 msng_filenode_set.setdefault(fname, {}))
1805 changedfiles[fname] = 1
1807 changedfiles[fname] = 1
1806 changedfiles = changedfiles.keys()
1808 changedfiles = changedfiles.keys()
1807 changedfiles.sort()
1809 changedfiles.sort()
1808 # Go through all our files in order sorted by name.
1810 # Go through all our files in order sorted by name.
1809 for fname in changedfiles:
1811 for fname in changedfiles:
1810 filerevlog = self.file(fname)
1812 filerevlog = self.file(fname)
1811 if filerevlog.count() == 0:
1813 if filerevlog.count() == 0:
1812 raise util.Abort(_("empty or missing revlog for %s") % fname)
1814 raise util.Abort(_("empty or missing revlog for %s") % fname)
1813 # Toss out the filenodes that the recipient isn't really
1815 # Toss out the filenodes that the recipient isn't really
1814 # missing.
1816 # missing.
1815 if fname in msng_filenode_set:
1817 if fname in msng_filenode_set:
1816 prune_filenodes(fname, filerevlog)
1818 prune_filenodes(fname, filerevlog)
1817 msng_filenode_lst = msng_filenode_set[fname].keys()
1819 msng_filenode_lst = msng_filenode_set[fname].keys()
1818 else:
1820 else:
1819 msng_filenode_lst = []
1821 msng_filenode_lst = []
1820 # If any filenodes are left, generate the group for them,
1822 # If any filenodes are left, generate the group for them,
1821 # otherwise don't bother.
1823 # otherwise don't bother.
1822 if len(msng_filenode_lst) > 0:
1824 if len(msng_filenode_lst) > 0:
1823 yield changegroup.chunkheader(len(fname))
1825 yield changegroup.chunkheader(len(fname))
1824 yield fname
1826 yield fname
1825 # Sort the filenodes by their revision #
1827 # Sort the filenodes by their revision #
1826 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1828 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1827 # Create a group generator and only pass in a changenode
1829 # Create a group generator and only pass in a changenode
1828 # lookup function as we need to collect no information
1830 # lookup function as we need to collect no information
1829 # from filenodes.
1831 # from filenodes.
1830 group = filerevlog.group(msng_filenode_lst,
1832 group = filerevlog.group(msng_filenode_lst,
1831 lookup_filenode_link_func(fname))
1833 lookup_filenode_link_func(fname))
1832 for chnk in group:
1834 for chnk in group:
1833 yield chnk
1835 yield chnk
1834 if fname in msng_filenode_set:
1836 if fname in msng_filenode_set:
1835 # Don't need this anymore, toss it to free memory.
1837 # Don't need this anymore, toss it to free memory.
1836 del msng_filenode_set[fname]
1838 del msng_filenode_set[fname]
1837 # Signal that no more groups are left.
1839 # Signal that no more groups are left.
1838 yield changegroup.closechunk()
1840 yield changegroup.closechunk()
1839
1841
1840 if msng_cl_lst:
1842 if msng_cl_lst:
1841 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1843 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1842
1844
1843 return util.chunkbuffer(gengroup())
1845 return util.chunkbuffer(gengroup())
1844
1846
1845 def changegroup(self, basenodes, source):
1847 def changegroup(self, basenodes, source):
1846 """Generate a changegroup of all nodes that we have that a recipient
1848 """Generate a changegroup of all nodes that we have that a recipient
1847 doesn't.
1849 doesn't.
1848
1850
1849 This is much easier than the previous function as we can assume that
1851 This is much easier than the previous function as we can assume that
1850 the recipient has any changenode we aren't sending them."""
1852 the recipient has any changenode we aren't sending them."""
1851
1853
1852 self.hook('preoutgoing', throw=True, source=source)
1854 self.hook('preoutgoing', throw=True, source=source)
1853
1855
1854 cl = self.changelog
1856 cl = self.changelog
1855 nodes = cl.nodesbetween(basenodes, None)[0]
1857 nodes = cl.nodesbetween(basenodes, None)[0]
1856 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1858 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1857 self.changegroupinfo(nodes, source)
1859 self.changegroupinfo(nodes, source)
1858
1860
1859 def identity(x):
1861 def identity(x):
1860 return x
1862 return x
1861
1863
1862 def gennodelst(revlog):
1864 def gennodelst(revlog):
1863 for r in xrange(0, revlog.count()):
1865 for r in xrange(0, revlog.count()):
1864 n = revlog.node(r)
1866 n = revlog.node(r)
1865 if revlog.linkrev(n) in revset:
1867 if revlog.linkrev(n) in revset:
1866 yield n
1868 yield n
1867
1869
1868 def changed_file_collector(changedfileset):
1870 def changed_file_collector(changedfileset):
1869 def collect_changed_files(clnode):
1871 def collect_changed_files(clnode):
1870 c = cl.read(clnode)
1872 c = cl.read(clnode)
1871 for fname in c[3]:
1873 for fname in c[3]:
1872 changedfileset[fname] = 1
1874 changedfileset[fname] = 1
1873 return collect_changed_files
1875 return collect_changed_files
1874
1876
1875 def lookuprevlink_func(revlog):
1877 def lookuprevlink_func(revlog):
1876 def lookuprevlink(n):
1878 def lookuprevlink(n):
1877 return cl.node(revlog.linkrev(n))
1879 return cl.node(revlog.linkrev(n))
1878 return lookuprevlink
1880 return lookuprevlink
1879
1881
1880 def gengroup():
1882 def gengroup():
1881 # construct a list of all changed files
1883 # construct a list of all changed files
1882 changedfiles = {}
1884 changedfiles = {}
1883
1885
1884 for chnk in cl.group(nodes, identity,
1886 for chnk in cl.group(nodes, identity,
1885 changed_file_collector(changedfiles)):
1887 changed_file_collector(changedfiles)):
1886 yield chnk
1888 yield chnk
1887 changedfiles = changedfiles.keys()
1889 changedfiles = changedfiles.keys()
1888 changedfiles.sort()
1890 changedfiles.sort()
1889
1891
1890 mnfst = self.manifest
1892 mnfst = self.manifest
1891 nodeiter = gennodelst(mnfst)
1893 nodeiter = gennodelst(mnfst)
1892 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1894 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1893 yield chnk
1895 yield chnk
1894
1896
1895 for fname in changedfiles:
1897 for fname in changedfiles:
1896 filerevlog = self.file(fname)
1898 filerevlog = self.file(fname)
1897 if filerevlog.count() == 0:
1899 if filerevlog.count() == 0:
1898 raise util.Abort(_("empty or missing revlog for %s") % fname)
1900 raise util.Abort(_("empty or missing revlog for %s") % fname)
1899 nodeiter = gennodelst(filerevlog)
1901 nodeiter = gennodelst(filerevlog)
1900 nodeiter = list(nodeiter)
1902 nodeiter = list(nodeiter)
1901 if nodeiter:
1903 if nodeiter:
1902 yield changegroup.chunkheader(len(fname))
1904 yield changegroup.chunkheader(len(fname))
1903 yield fname
1905 yield fname
1904 lookup = lookuprevlink_func(filerevlog)
1906 lookup = lookuprevlink_func(filerevlog)
1905 for chnk in filerevlog.group(nodeiter, lookup):
1907 for chnk in filerevlog.group(nodeiter, lookup):
1906 yield chnk
1908 yield chnk
1907
1909
1908 yield changegroup.closechunk()
1910 yield changegroup.closechunk()
1909
1911
1910 if nodes:
1912 if nodes:
1911 self.hook('outgoing', node=hex(nodes[0]), source=source)
1913 self.hook('outgoing', node=hex(nodes[0]), source=source)
1912
1914
1913 return util.chunkbuffer(gengroup())
1915 return util.chunkbuffer(gengroup())
1914
1916
1915 def addchangegroup(self, source, srctype, url, emptyok=False):
1917 def addchangegroup(self, source, srctype, url, emptyok=False):
1916 """add changegroup to repo.
1918 """add changegroup to repo.
1917
1919
1918 return values:
1920 return values:
1919 - nothing changed or no source: 0
1921 - nothing changed or no source: 0
1920 - more heads than before: 1+added heads (2..n)
1922 - more heads than before: 1+added heads (2..n)
1921 - less heads than before: -1-removed heads (-2..-n)
1923 - less heads than before: -1-removed heads (-2..-n)
1922 - number of heads stays the same: 1
1924 - number of heads stays the same: 1
1923 """
1925 """
1924 def csmap(x):
1926 def csmap(x):
1925 self.ui.debug(_("add changeset %s\n") % short(x))
1927 self.ui.debug(_("add changeset %s\n") % short(x))
1926 return cl.count()
1928 return cl.count()
1927
1929
1928 def revmap(x):
1930 def revmap(x):
1929 return cl.rev(x)
1931 return cl.rev(x)
1930
1932
1931 if not source:
1933 if not source:
1932 return 0
1934 return 0
1933
1935
1934 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1936 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1935
1937
1936 changesets = files = revisions = 0
1938 changesets = files = revisions = 0
1937
1939
1938 # write changelog data to temp files so concurrent readers will not see
1940 # write changelog data to temp files so concurrent readers will not see
1939 # inconsistent view
1941 # inconsistent view
1940 cl = self.changelog
1942 cl = self.changelog
1941 cl.delayupdate()
1943 cl.delayupdate()
1942 oldheads = len(cl.heads())
1944 oldheads = len(cl.heads())
1943
1945
1944 tr = self.transaction()
1946 tr = self.transaction()
1945 try:
1947 try:
1946 trp = weakref.proxy(tr)
1948 trp = weakref.proxy(tr)
1947 # pull off the changeset group
1949 # pull off the changeset group
1948 self.ui.status(_("adding changesets\n"))
1950 self.ui.status(_("adding changesets\n"))
1949 cor = cl.count() - 1
1951 cor = cl.count() - 1
1950 chunkiter = changegroup.chunkiter(source)
1952 chunkiter = changegroup.chunkiter(source)
1951 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1953 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1952 raise util.Abort(_("received changelog group is empty"))
1954 raise util.Abort(_("received changelog group is empty"))
1953 cnr = cl.count() - 1
1955 cnr = cl.count() - 1
1954 changesets = cnr - cor
1956 changesets = cnr - cor
1955
1957
1956 # pull off the manifest group
1958 # pull off the manifest group
1957 self.ui.status(_("adding manifests\n"))
1959 self.ui.status(_("adding manifests\n"))
1958 chunkiter = changegroup.chunkiter(source)
1960 chunkiter = changegroup.chunkiter(source)
1959 # no need to check for empty manifest group here:
1961 # no need to check for empty manifest group here:
1960 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1962 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1961 # no new manifest will be created and the manifest group will
1963 # no new manifest will be created and the manifest group will
1962 # be empty during the pull
1964 # be empty during the pull
1963 self.manifest.addgroup(chunkiter, revmap, trp)
1965 self.manifest.addgroup(chunkiter, revmap, trp)
1964
1966
1965 # process the files
1967 # process the files
1966 self.ui.status(_("adding file changes\n"))
1968 self.ui.status(_("adding file changes\n"))
1967 while 1:
1969 while 1:
1968 f = changegroup.getchunk(source)
1970 f = changegroup.getchunk(source)
1969 if not f:
1971 if not f:
1970 break
1972 break
1971 self.ui.debug(_("adding %s revisions\n") % f)
1973 self.ui.debug(_("adding %s revisions\n") % f)
1972 fl = self.file(f)
1974 fl = self.file(f)
1973 o = fl.count()
1975 o = fl.count()
1974 chunkiter = changegroup.chunkiter(source)
1976 chunkiter = changegroup.chunkiter(source)
1975 if fl.addgroup(chunkiter, revmap, trp) is None:
1977 if fl.addgroup(chunkiter, revmap, trp) is None:
1976 raise util.Abort(_("received file revlog group is empty"))
1978 raise util.Abort(_("received file revlog group is empty"))
1977 revisions += fl.count() - o
1979 revisions += fl.count() - o
1978 files += 1
1980 files += 1
1979
1981
1980 # make changelog see real files again
1982 # make changelog see real files again
1981 cl.finalize(trp)
1983 cl.finalize(trp)
1982
1984
1983 newheads = len(self.changelog.heads())
1985 newheads = len(self.changelog.heads())
1984 heads = ""
1986 heads = ""
1985 if oldheads and newheads != oldheads:
1987 if oldheads and newheads != oldheads:
1986 heads = _(" (%+d heads)") % (newheads - oldheads)
1988 heads = _(" (%+d heads)") % (newheads - oldheads)
1987
1989
1988 self.ui.status(_("added %d changesets"
1990 self.ui.status(_("added %d changesets"
1989 " with %d changes to %d files%s\n")
1991 " with %d changes to %d files%s\n")
1990 % (changesets, revisions, files, heads))
1992 % (changesets, revisions, files, heads))
1991
1993
1992 if changesets > 0:
1994 if changesets > 0:
1993 self.hook('pretxnchangegroup', throw=True,
1995 self.hook('pretxnchangegroup', throw=True,
1994 node=hex(self.changelog.node(cor+1)), source=srctype,
1996 node=hex(self.changelog.node(cor+1)), source=srctype,
1995 url=url)
1997 url=url)
1996
1998
1997 tr.close()
1999 tr.close()
1998 finally:
2000 finally:
1999 del tr
2001 del tr
2000
2002
2001 if changesets > 0:
2003 if changesets > 0:
2002 # forcefully update the on-disk branch cache
2004 # forcefully update the on-disk branch cache
2003 self.ui.debug(_("updating the branch cache\n"))
2005 self.ui.debug(_("updating the branch cache\n"))
2004 self.branchcache = None
2006 self.branchcache = None
2005 self.branchtags()
2007 self.branchtags()
2006 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2008 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2007 source=srctype, url=url)
2009 source=srctype, url=url)
2008
2010
2009 for i in xrange(cor + 1, cnr + 1):
2011 for i in xrange(cor + 1, cnr + 1):
2010 self.hook("incoming", node=hex(self.changelog.node(i)),
2012 self.hook("incoming", node=hex(self.changelog.node(i)),
2011 source=srctype, url=url)
2013 source=srctype, url=url)
2012
2014
2013 # never return 0 here:
2015 # never return 0 here:
2014 if newheads < oldheads:
2016 if newheads < oldheads:
2015 return newheads - oldheads - 1
2017 return newheads - oldheads - 1
2016 else:
2018 else:
2017 return newheads - oldheads + 1
2019 return newheads - oldheads + 1
2018
2020
2019
2021
2020 def stream_in(self, remote):
2022 def stream_in(self, remote):
2021 fp = remote.stream_out()
2023 fp = remote.stream_out()
2022 l = fp.readline()
2024 l = fp.readline()
2023 try:
2025 try:
2024 resp = int(l)
2026 resp = int(l)
2025 except ValueError:
2027 except ValueError:
2026 raise util.UnexpectedOutput(
2028 raise util.UnexpectedOutput(
2027 _('Unexpected response from remote server:'), l)
2029 _('Unexpected response from remote server:'), l)
2028 if resp == 1:
2030 if resp == 1:
2029 raise util.Abort(_('operation forbidden by server'))
2031 raise util.Abort(_('operation forbidden by server'))
2030 elif resp == 2:
2032 elif resp == 2:
2031 raise util.Abort(_('locking the remote repository failed'))
2033 raise util.Abort(_('locking the remote repository failed'))
2032 elif resp != 0:
2034 elif resp != 0:
2033 raise util.Abort(_('the server sent an unknown error code'))
2035 raise util.Abort(_('the server sent an unknown error code'))
2034 self.ui.status(_('streaming all changes\n'))
2036 self.ui.status(_('streaming all changes\n'))
2035 l = fp.readline()
2037 l = fp.readline()
2036 try:
2038 try:
2037 total_files, total_bytes = map(int, l.split(' ', 1))
2039 total_files, total_bytes = map(int, l.split(' ', 1))
2038 except ValueError, TypeError:
2040 except ValueError, TypeError:
2039 raise util.UnexpectedOutput(
2041 raise util.UnexpectedOutput(
2040 _('Unexpected response from remote server:'), l)
2042 _('Unexpected response from remote server:'), l)
2041 self.ui.status(_('%d files to transfer, %s of data\n') %
2043 self.ui.status(_('%d files to transfer, %s of data\n') %
2042 (total_files, util.bytecount(total_bytes)))
2044 (total_files, util.bytecount(total_bytes)))
2043 start = time.time()
2045 start = time.time()
2044 for i in xrange(total_files):
2046 for i in xrange(total_files):
2045 # XXX doesn't support '\n' or '\r' in filenames
2047 # XXX doesn't support '\n' or '\r' in filenames
2046 l = fp.readline()
2048 l = fp.readline()
2047 try:
2049 try:
2048 name, size = l.split('\0', 1)
2050 name, size = l.split('\0', 1)
2049 size = int(size)
2051 size = int(size)
2050 except ValueError, TypeError:
2052 except ValueError, TypeError:
2051 raise util.UnexpectedOutput(
2053 raise util.UnexpectedOutput(
2052 _('Unexpected response from remote server:'), l)
2054 _('Unexpected response from remote server:'), l)
2053 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2055 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2054 ofp = self.sopener(name, 'w')
2056 ofp = self.sopener(name, 'w')
2055 for chunk in util.filechunkiter(fp, limit=size):
2057 for chunk in util.filechunkiter(fp, limit=size):
2056 ofp.write(chunk)
2058 ofp.write(chunk)
2057 ofp.close()
2059 ofp.close()
2058 elapsed = time.time() - start
2060 elapsed = time.time() - start
2059 if elapsed <= 0:
2061 if elapsed <= 0:
2060 elapsed = 0.001
2062 elapsed = 0.001
2061 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2063 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2062 (util.bytecount(total_bytes), elapsed,
2064 (util.bytecount(total_bytes), elapsed,
2063 util.bytecount(total_bytes / elapsed)))
2065 util.bytecount(total_bytes / elapsed)))
2064 self.invalidate()
2066 self.invalidate()
2065 return len(self.heads()) + 1
2067 return len(self.heads()) + 1
2066
2068
2067 def clone(self, remote, heads=[], stream=False):
2069 def clone(self, remote, heads=[], stream=False):
2068 '''clone remote repository.
2070 '''clone remote repository.
2069
2071
2070 keyword arguments:
2072 keyword arguments:
2071 heads: list of revs to clone (forces use of pull)
2073 heads: list of revs to clone (forces use of pull)
2072 stream: use streaming clone if possible'''
2074 stream: use streaming clone if possible'''
2073
2075
2074 # now, all clients that can request uncompressed clones can
2076 # now, all clients that can request uncompressed clones can
2075 # read repo formats supported by all servers that can serve
2077 # read repo formats supported by all servers that can serve
2076 # them.
2078 # them.
2077
2079
2078 # if revlog format changes, client will have to check version
2080 # if revlog format changes, client will have to check version
2079 # and format flags on "stream" capability, and use
2081 # and format flags on "stream" capability, and use
2080 # uncompressed only if compatible.
2082 # uncompressed only if compatible.
2081
2083
2082 if stream and not heads and remote.capable('stream'):
2084 if stream and not heads and remote.capable('stream'):
2083 return self.stream_in(remote)
2085 return self.stream_in(remote)
2084 return self.pull(remote, heads)
2086 return self.pull(remote, heads)
2085
2087
2086 # used to avoid circular references so destructors work
2088 # used to avoid circular references so destructors work
2087 def aftertrans(files):
2089 def aftertrans(files):
2088 renamefiles = [tuple(t) for t in files]
2090 renamefiles = [tuple(t) for t in files]
2089 def a():
2091 def a():
2090 for src, dest in renamefiles:
2092 for src, dest in renamefiles:
2091 util.rename(src, dest)
2093 util.rename(src, dest)
2092 return a
2094 return a
2093
2095
2094 def instance(ui, path, create):
2096 def instance(ui, path, create):
2095 return localrepository(ui, util.drop_scheme('file', path), create)
2097 return localrepository(ui, util.drop_scheme('file', path), create)
2096
2098
2097 def islocal(path):
2099 def islocal(path):
2098 return True
2100 return True
@@ -1,102 +1,104
1 # transaction.py - simple journalling scheme for mercurial
1 # transaction.py - simple journalling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms
11 # This software may be used and distributed according to the terms
12 # of the GNU General Public License, incorporated herein by reference.
12 # of the GNU General Public License, incorporated herein by reference.
13
13
14 from i18n import _
14 from i18n import _
15 import os
15 import os
16
16
17 class transaction(object):
17 class transaction(object):
18 def __init__(self, report, opener, journal, after=None):
18 def __init__(self, report, opener, journal, after=None, createmode=None):
19 self.journal = None
19 self.journal = None
20
20
21 self.count = 1
21 self.count = 1
22 self.report = report
22 self.report = report
23 self.opener = opener
23 self.opener = opener
24 self.after = after
24 self.after = after
25 self.entries = []
25 self.entries = []
26 self.map = {}
26 self.map = {}
27 self.journal = journal
27 self.journal = journal
28
28
29 self.file = open(self.journal, "w")
29 self.file = open(self.journal, "w")
30 if createmode is not None:
31 os.chmod(self.journal, createmode & 0666)
30
32
31 def __del__(self):
33 def __del__(self):
32 if self.journal:
34 if self.journal:
33 if self.entries: self.abort()
35 if self.entries: self.abort()
34 self.file.close()
36 self.file.close()
35 try: os.unlink(self.journal)
37 try: os.unlink(self.journal)
36 except: pass
38 except: pass
37
39
38 def add(self, file, offset, data=None):
40 def add(self, file, offset, data=None):
39 if file in self.map: return
41 if file in self.map: return
40 self.entries.append((file, offset, data))
42 self.entries.append((file, offset, data))
41 self.map[file] = len(self.entries) - 1
43 self.map[file] = len(self.entries) - 1
42 # add enough data to the journal to do the truncate
44 # add enough data to the journal to do the truncate
43 self.file.write("%s\0%d\n" % (file, offset))
45 self.file.write("%s\0%d\n" % (file, offset))
44 self.file.flush()
46 self.file.flush()
45
47
46 def find(self, file):
48 def find(self, file):
47 if file in self.map:
49 if file in self.map:
48 return self.entries[self.map[file]]
50 return self.entries[self.map[file]]
49 return None
51 return None
50
52
51 def replace(self, file, offset, data=None):
53 def replace(self, file, offset, data=None):
52 if file not in self.map:
54 if file not in self.map:
53 raise KeyError(file)
55 raise KeyError(file)
54 index = self.map[file]
56 index = self.map[file]
55 self.entries[index] = (file, offset, data)
57 self.entries[index] = (file, offset, data)
56 self.file.write("%s\0%d\n" % (file, offset))
58 self.file.write("%s\0%d\n" % (file, offset))
57 self.file.flush()
59 self.file.flush()
58
60
59 def nest(self):
61 def nest(self):
60 self.count += 1
62 self.count += 1
61 return self
63 return self
62
64
63 def running(self):
65 def running(self):
64 return self.count > 0
66 return self.count > 0
65
67
66 def close(self):
68 def close(self):
67 self.count -= 1
69 self.count -= 1
68 if self.count != 0:
70 if self.count != 0:
69 return
71 return
70 self.file.close()
72 self.file.close()
71 self.entries = []
73 self.entries = []
72 if self.after:
74 if self.after:
73 self.after()
75 self.after()
74 else:
76 else:
75 os.unlink(self.journal)
77 os.unlink(self.journal)
76 self.journal = None
78 self.journal = None
77
79
78 def abort(self):
80 def abort(self):
79 if not self.entries: return
81 if not self.entries: return
80
82
81 self.report(_("transaction abort!\n"))
83 self.report(_("transaction abort!\n"))
82
84
83 for f, o, ignore in self.entries:
85 for f, o, ignore in self.entries:
84 try:
86 try:
85 self.opener(f, "a").truncate(o)
87 self.opener(f, "a").truncate(o)
86 except:
88 except:
87 self.report(_("failed to truncate %s\n") % f)
89 self.report(_("failed to truncate %s\n") % f)
88
90
89 self.entries = []
91 self.entries = []
90
92
91 self.report(_("rollback completed\n"))
93 self.report(_("rollback completed\n"))
92
94
93 def rollback(opener, file):
95 def rollback(opener, file):
94 files = {}
96 files = {}
95 for l in open(file).readlines():
97 for l in open(file).readlines():
96 f, o = l.split('\0')
98 f, o = l.split('\0')
97 files[f] = o
99 files[f] = o
98 for f in files:
100 for f in files:
99 o = files[f]
101 o = files[f]
100 opener(f, "a").truncate(int(o))
102 opener(f, "a").truncate(int(o))
101 os.unlink(file)
103 os.unlink(file)
102
104
@@ -1,67 +1,67
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # test that new files created in .hg inherit the permissions from .hg/store
3 # test that new files created in .hg inherit the permissions from .hg/store
4
4
5 "$TESTDIR/hghave" unix-permissions || exit 80
5 "$TESTDIR/hghave" unix-permissions || exit 80
6
6
7 mkdir dir
7 mkdir dir
8 # just in case somebody has a strange $TMPDIR
8 # just in case somebody has a strange $TMPDIR
9 chmod g-s dir
9 chmod g-s dir
10 cd dir
10 cd dir
11
11
12 cat >printmodes.py <<EOF
12 cat >printmodes.py <<EOF
13 import os, sys
13 import os, sys
14
14
15 allnames = []
15 allnames = []
16 isdir = {}
16 isdir = {}
17 for root, dirs, files in os.walk(sys.argv[1]):
17 for root, dirs, files in os.walk(sys.argv[1]):
18 for d in dirs:
18 for d in dirs:
19 name = os.path.join(root, d)
19 name = os.path.join(root, d)
20 isdir[name] = 1
20 isdir[name] = 1
21 allnames.append(name)
21 allnames.append(name)
22 for f in files:
22 for f in files:
23 name = os.path.join(root, f)
23 name = os.path.join(root, f)
24 allnames.append(name)
24 allnames.append(name)
25 allnames.sort()
25 allnames.sort()
26 for name in allnames:
26 for name in allnames:
27 suffix = name in isdir and '/' or ''
27 suffix = name in isdir and '/' or ''
28 print '%05o %s%s' % (os.lstat(name).st_mode & 07777, name, suffix)
28 print '%05o %s%s' % (os.lstat(name).st_mode & 07777, name, suffix)
29 EOF
29 EOF
30
30
31 umask 077
31 umask 077
32
32
33 hg init repo
33 hg init repo
34 cd repo
34 cd repo
35
35
36 chmod 02770 .hg/store
36 chmod 02770 .hg/store
37
37
38 echo '% before commit'
38 echo '% before commit'
39 echo '% store can be written by the group, other files cannot'
39 echo '% store can be written by the group, other files cannot'
40 echo '% store is setgid'
40 echo '% store is setgid'
41 python ../printmodes.py .
41 python ../printmodes.py .
42
42
43 mkdir dir
43 mkdir dir
44 touch foo dir/bar
44 touch foo dir/bar
45 hg ci -qAm 'add files'
45 hg ci -qAm 'add files'
46
46
47 echo
47 echo
48 echo '% after commit'
48 echo '% after commit'
49 echo '% working dir files can only be written by the owner'
49 echo '% working dir files can only be written by the owner'
50 echo '% files created in .hg can be written by the group'
50 echo '% files created in .hg can be written by the group'
51 echo '% (in particular, store/**, dirstate, branch.cache)'
51 echo '% (in particular, store/**, dirstate, branch.cache, undo files)'
52 echo '% new directories are setgid'
52 echo '% new directories are setgid'
53 python ../printmodes.py .
53 python ../printmodes.py .
54
54
55 umask 007
55 umask 007
56 hg init ../push
56 hg init ../push
57 echo
57 echo
58 echo '% before push'
58 echo '% before push'
59 echo '% group can write everything'
59 echo '% group can write everything'
60 python ../printmodes.py ../push
60 python ../printmodes.py ../push
61
61
62 umask 077
62 umask 077
63 hg -q push ../push
63 hg -q push ../push
64 echo
64 echo
65 echo '% after push'
65 echo '% after push'
66 echo '% group can still write everything'
66 echo '% group can still write everything'
67 python ../printmodes.py ../push
67 python ../printmodes.py ../push
@@ -1,54 +1,54
1 % before commit
1 % before commit
2 % store can be written by the group, other files cannot
2 % store can be written by the group, other files cannot
3 % store is setgid
3 % store is setgid
4 00700 ./.hg/
4 00700 ./.hg/
5 00600 ./.hg/00changelog.i
5 00600 ./.hg/00changelog.i
6 00600 ./.hg/requires
6 00600 ./.hg/requires
7 02770 ./.hg/store/
7 02770 ./.hg/store/
8
8
9 % after commit
9 % after commit
10 % working dir files can only be written by the owner
10 % working dir files can only be written by the owner
11 % files created in .hg can be written by the group
11 % files created in .hg can be written by the group
12 % (in particular, store/**, dirstate, branch.cache)
12 % (in particular, store/**, dirstate, branch.cache, undo files)
13 % new directories are setgid
13 % new directories are setgid
14 00700 ./.hg/
14 00700 ./.hg/
15 00600 ./.hg/00changelog.i
15 00600 ./.hg/00changelog.i
16 00660 ./.hg/dirstate
16 00660 ./.hg/dirstate
17 00600 ./.hg/requires
17 00600 ./.hg/requires
18 02770 ./.hg/store/
18 02770 ./.hg/store/
19 00660 ./.hg/store/00changelog.i
19 00660 ./.hg/store/00changelog.i
20 00660 ./.hg/store/00manifest.i
20 00660 ./.hg/store/00manifest.i
21 02770 ./.hg/store/data/
21 02770 ./.hg/store/data/
22 02770 ./.hg/store/data/dir/
22 02770 ./.hg/store/data/dir/
23 00660 ./.hg/store/data/dir/bar.i
23 00660 ./.hg/store/data/dir/bar.i
24 00660 ./.hg/store/data/foo.i
24 00660 ./.hg/store/data/foo.i
25 00600 ./.hg/store/undo
25 00660 ./.hg/store/undo
26 00660 ./.hg/undo.branch
26 00660 ./.hg/undo.branch
27 00660 ./.hg/undo.dirstate
27 00660 ./.hg/undo.dirstate
28 00700 ./dir/
28 00700 ./dir/
29 00600 ./dir/bar
29 00600 ./dir/bar
30 00600 ./foo
30 00600 ./foo
31
31
32 % before push
32 % before push
33 % group can write everything
33 % group can write everything
34 00770 ../push/.hg/
34 00770 ../push/.hg/
35 00660 ../push/.hg/00changelog.i
35 00660 ../push/.hg/00changelog.i
36 00660 ../push/.hg/requires
36 00660 ../push/.hg/requires
37 00770 ../push/.hg/store/
37 00770 ../push/.hg/store/
38
38
39 % after push
39 % after push
40 % group can still write everything
40 % group can still write everything
41 00770 ../push/.hg/
41 00770 ../push/.hg/
42 00660 ../push/.hg/00changelog.i
42 00660 ../push/.hg/00changelog.i
43 00660 ../push/.hg/branch.cache
43 00660 ../push/.hg/branch.cache
44 00660 ../push/.hg/requires
44 00660 ../push/.hg/requires
45 00770 ../push/.hg/store/
45 00770 ../push/.hg/store/
46 00660 ../push/.hg/store/00changelog.i
46 00660 ../push/.hg/store/00changelog.i
47 00660 ../push/.hg/store/00manifest.i
47 00660 ../push/.hg/store/00manifest.i
48 00770 ../push/.hg/store/data/
48 00770 ../push/.hg/store/data/
49 00770 ../push/.hg/store/data/dir/
49 00770 ../push/.hg/store/data/dir/
50 00660 ../push/.hg/store/data/dir/bar.i
50 00660 ../push/.hg/store/data/dir/bar.i
51 00660 ../push/.hg/store/data/foo.i
51 00660 ../push/.hg/store/data/foo.i
52 00600 ../push/.hg/store/undo
52 00660 ../push/.hg/store/undo
53 00660 ../push/.hg/undo.branch
53 00660 ../push/.hg/undo.branch
54 00660 ../push/.hg/undo.dirstate
54 00660 ../push/.hg/undo.dirstate
General Comments 0
You need to be logged in to leave comments. Login now