Show More
@@ -1,1095 +1,1102 | |||||
1 | # scmutil.py - Mercurial core utility functions |
|
1 | # scmutil.py - Mercurial core utility functions | |
2 | # |
|
2 | # | |
3 | # Copyright Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from i18n import _ |
|
8 | from i18n import _ | |
9 | from mercurial.node import nullrev |
|
9 | from mercurial.node import nullrev | |
10 | import util, error, osutil, revset, similar, encoding, phases, parsers |
|
10 | import util, error, osutil, revset, similar, encoding, phases, parsers | |
11 | import pathutil |
|
11 | import pathutil | |
12 | import match as matchmod |
|
12 | import match as matchmod | |
13 | import os, errno, re, glob, tempfile |
|
13 | import os, errno, re, glob, tempfile | |
14 |
|
14 | |||
15 | if os.name == 'nt': |
|
15 | if os.name == 'nt': | |
16 | import scmwindows as scmplatform |
|
16 | import scmwindows as scmplatform | |
17 | else: |
|
17 | else: | |
18 | import scmposix as scmplatform |
|
18 | import scmposix as scmplatform | |
19 |
|
19 | |||
20 | systemrcpath = scmplatform.systemrcpath |
|
20 | systemrcpath = scmplatform.systemrcpath | |
21 | userrcpath = scmplatform.userrcpath |
|
21 | userrcpath = scmplatform.userrcpath | |
22 |
|
22 | |||
23 | class status(tuple): |
|
23 | class status(tuple): | |
24 | '''Named tuple with a list of files per status. The 'deleted', 'unknown' |
|
24 | '''Named tuple with a list of files per status. The 'deleted', 'unknown' | |
25 | and 'ignored' properties are only relevant to the working copy. |
|
25 | and 'ignored' properties are only relevant to the working copy. | |
26 | ''' |
|
26 | ''' | |
27 |
|
27 | |||
28 | __slots__ = () |
|
28 | __slots__ = () | |
29 |
|
29 | |||
30 | def __new__(cls, modified, added, removed, deleted, unknown, ignored, |
|
30 | def __new__(cls, modified, added, removed, deleted, unknown, ignored, | |
31 | clean): |
|
31 | clean): | |
32 | return tuple.__new__(cls, (modified, added, removed, deleted, unknown, |
|
32 | return tuple.__new__(cls, (modified, added, removed, deleted, unknown, | |
33 | ignored, clean)) |
|
33 | ignored, clean)) | |
34 |
|
34 | |||
35 | @property |
|
35 | @property | |
36 | def modified(self): |
|
36 | def modified(self): | |
37 | '''files that have been modified''' |
|
37 | '''files that have been modified''' | |
38 | return self[0] |
|
38 | return self[0] | |
39 |
|
39 | |||
40 | @property |
|
40 | @property | |
41 | def added(self): |
|
41 | def added(self): | |
42 | '''files that have been added''' |
|
42 | '''files that have been added''' | |
43 | return self[1] |
|
43 | return self[1] | |
44 |
|
44 | |||
45 | @property |
|
45 | @property | |
46 | def removed(self): |
|
46 | def removed(self): | |
47 | '''files that have been removed''' |
|
47 | '''files that have been removed''' | |
48 | return self[2] |
|
48 | return self[2] | |
49 |
|
49 | |||
50 | @property |
|
50 | @property | |
51 | def deleted(self): |
|
51 | def deleted(self): | |
52 | '''files that are in the dirstate, but have been deleted from the |
|
52 | '''files that are in the dirstate, but have been deleted from the | |
53 | working copy (aka "missing") |
|
53 | working copy (aka "missing") | |
54 | ''' |
|
54 | ''' | |
55 | return self[3] |
|
55 | return self[3] | |
56 |
|
56 | |||
57 | @property |
|
57 | @property | |
58 | def unknown(self): |
|
58 | def unknown(self): | |
59 | '''files not in the dirstate that are not ignored''' |
|
59 | '''files not in the dirstate that are not ignored''' | |
60 | return self[4] |
|
60 | return self[4] | |
61 |
|
61 | |||
62 | @property |
|
62 | @property | |
63 | def ignored(self): |
|
63 | def ignored(self): | |
64 | '''files not in the dirstate that are ignored (by _dirignore())''' |
|
64 | '''files not in the dirstate that are ignored (by _dirignore())''' | |
65 | return self[5] |
|
65 | return self[5] | |
66 |
|
66 | |||
67 | @property |
|
67 | @property | |
68 | def clean(self): |
|
68 | def clean(self): | |
69 | '''files that have not been modified''' |
|
69 | '''files that have not been modified''' | |
70 | return self[6] |
|
70 | return self[6] | |
71 |
|
71 | |||
72 | def __repr__(self, *args, **kwargs): |
|
72 | def __repr__(self, *args, **kwargs): | |
73 | return (('<status modified=%r, added=%r, removed=%r, deleted=%r, ' |
|
73 | return (('<status modified=%r, added=%r, removed=%r, deleted=%r, ' | |
74 | 'unknown=%r, ignored=%r, clean=%r>') % self) |
|
74 | 'unknown=%r, ignored=%r, clean=%r>') % self) | |
75 |
|
75 | |||
76 | def itersubrepos(ctx1, ctx2): |
|
76 | def itersubrepos(ctx1, ctx2): | |
77 | """find subrepos in ctx1 or ctx2""" |
|
77 | """find subrepos in ctx1 or ctx2""" | |
78 | # Create a (subpath, ctx) mapping where we prefer subpaths from |
|
78 | # Create a (subpath, ctx) mapping where we prefer subpaths from | |
79 | # ctx1. The subpaths from ctx2 are important when the .hgsub file |
|
79 | # ctx1. The subpaths from ctx2 are important when the .hgsub file | |
80 | # has been modified (in ctx2) but not yet committed (in ctx1). |
|
80 | # has been modified (in ctx2) but not yet committed (in ctx1). | |
81 | subpaths = dict.fromkeys(ctx2.substate, ctx2) |
|
81 | subpaths = dict.fromkeys(ctx2.substate, ctx2) | |
82 | subpaths.update(dict.fromkeys(ctx1.substate, ctx1)) |
|
82 | subpaths.update(dict.fromkeys(ctx1.substate, ctx1)) | |
83 | for subpath, ctx in sorted(subpaths.iteritems()): |
|
83 | for subpath, ctx in sorted(subpaths.iteritems()): | |
84 | yield subpath, ctx.sub(subpath) |
|
84 | yield subpath, ctx.sub(subpath) | |
85 |
|
85 | |||
86 | def nochangesfound(ui, repo, excluded=None): |
|
86 | def nochangesfound(ui, repo, excluded=None): | |
87 | '''Report no changes for push/pull, excluded is None or a list of |
|
87 | '''Report no changes for push/pull, excluded is None or a list of | |
88 | nodes excluded from the push/pull. |
|
88 | nodes excluded from the push/pull. | |
89 | ''' |
|
89 | ''' | |
90 | secretlist = [] |
|
90 | secretlist = [] | |
91 | if excluded: |
|
91 | if excluded: | |
92 | for n in excluded: |
|
92 | for n in excluded: | |
93 | if n not in repo: |
|
93 | if n not in repo: | |
94 | # discovery should not have included the filtered revision, |
|
94 | # discovery should not have included the filtered revision, | |
95 | # we have to explicitly exclude it until discovery is cleanup. |
|
95 | # we have to explicitly exclude it until discovery is cleanup. | |
96 | continue |
|
96 | continue | |
97 | ctx = repo[n] |
|
97 | ctx = repo[n] | |
98 | if ctx.phase() >= phases.secret and not ctx.extinct(): |
|
98 | if ctx.phase() >= phases.secret and not ctx.extinct(): | |
99 | secretlist.append(n) |
|
99 | secretlist.append(n) | |
100 |
|
100 | |||
101 | if secretlist: |
|
101 | if secretlist: | |
102 | ui.status(_("no changes found (ignored %d secret changesets)\n") |
|
102 | ui.status(_("no changes found (ignored %d secret changesets)\n") | |
103 | % len(secretlist)) |
|
103 | % len(secretlist)) | |
104 | else: |
|
104 | else: | |
105 | ui.status(_("no changes found\n")) |
|
105 | ui.status(_("no changes found\n")) | |
106 |
|
106 | |||
107 | def checknewlabel(repo, lbl, kind): |
|
107 | def checknewlabel(repo, lbl, kind): | |
108 | # Do not use the "kind" parameter in ui output. |
|
108 | # Do not use the "kind" parameter in ui output. | |
109 | # It makes strings difficult to translate. |
|
109 | # It makes strings difficult to translate. | |
110 | if lbl in ['tip', '.', 'null']: |
|
110 | if lbl in ['tip', '.', 'null']: | |
111 | raise util.Abort(_("the name '%s' is reserved") % lbl) |
|
111 | raise util.Abort(_("the name '%s' is reserved") % lbl) | |
112 | for c in (':', '\0', '\n', '\r'): |
|
112 | for c in (':', '\0', '\n', '\r'): | |
113 | if c in lbl: |
|
113 | if c in lbl: | |
114 | raise util.Abort(_("%r cannot be used in a name") % c) |
|
114 | raise util.Abort(_("%r cannot be used in a name") % c) | |
115 | try: |
|
115 | try: | |
116 | int(lbl) |
|
116 | int(lbl) | |
117 | raise util.Abort(_("cannot use an integer as a name")) |
|
117 | raise util.Abort(_("cannot use an integer as a name")) | |
118 | except ValueError: |
|
118 | except ValueError: | |
119 | pass |
|
119 | pass | |
120 |
|
120 | |||
121 | def checkfilename(f): |
|
121 | def checkfilename(f): | |
122 | '''Check that the filename f is an acceptable filename for a tracked file''' |
|
122 | '''Check that the filename f is an acceptable filename for a tracked file''' | |
123 | if '\r' in f or '\n' in f: |
|
123 | if '\r' in f or '\n' in f: | |
124 | raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f) |
|
124 | raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f) | |
125 |
|
125 | |||
126 | def checkportable(ui, f): |
|
126 | def checkportable(ui, f): | |
127 | '''Check if filename f is portable and warn or abort depending on config''' |
|
127 | '''Check if filename f is portable and warn or abort depending on config''' | |
128 | checkfilename(f) |
|
128 | checkfilename(f) | |
129 | abort, warn = checkportabilityalert(ui) |
|
129 | abort, warn = checkportabilityalert(ui) | |
130 | if abort or warn: |
|
130 | if abort or warn: | |
131 | msg = util.checkwinfilename(f) |
|
131 | msg = util.checkwinfilename(f) | |
132 | if msg: |
|
132 | if msg: | |
133 | msg = "%s: %r" % (msg, f) |
|
133 | msg = "%s: %r" % (msg, f) | |
134 | if abort: |
|
134 | if abort: | |
135 | raise util.Abort(msg) |
|
135 | raise util.Abort(msg) | |
136 | ui.warn(_("warning: %s\n") % msg) |
|
136 | ui.warn(_("warning: %s\n") % msg) | |
137 |
|
137 | |||
138 | def checkportabilityalert(ui): |
|
138 | def checkportabilityalert(ui): | |
139 | '''check if the user's config requests nothing, a warning, or abort for |
|
139 | '''check if the user's config requests nothing, a warning, or abort for | |
140 | non-portable filenames''' |
|
140 | non-portable filenames''' | |
141 | val = ui.config('ui', 'portablefilenames', 'warn') |
|
141 | val = ui.config('ui', 'portablefilenames', 'warn') | |
142 | lval = val.lower() |
|
142 | lval = val.lower() | |
143 | bval = util.parsebool(val) |
|
143 | bval = util.parsebool(val) | |
144 | abort = os.name == 'nt' or lval == 'abort' |
|
144 | abort = os.name == 'nt' or lval == 'abort' | |
145 | warn = bval or lval == 'warn' |
|
145 | warn = bval or lval == 'warn' | |
146 | if bval is None and not (warn or abort or lval == 'ignore'): |
|
146 | if bval is None and not (warn or abort or lval == 'ignore'): | |
147 | raise error.ConfigError( |
|
147 | raise error.ConfigError( | |
148 | _("ui.portablefilenames value is invalid ('%s')") % val) |
|
148 | _("ui.portablefilenames value is invalid ('%s')") % val) | |
149 | return abort, warn |
|
149 | return abort, warn | |
150 |
|
150 | |||
151 | class casecollisionauditor(object): |
|
151 | class casecollisionauditor(object): | |
152 | def __init__(self, ui, abort, dirstate): |
|
152 | def __init__(self, ui, abort, dirstate): | |
153 | self._ui = ui |
|
153 | self._ui = ui | |
154 | self._abort = abort |
|
154 | self._abort = abort | |
155 | allfiles = '\0'.join(dirstate._map) |
|
155 | allfiles = '\0'.join(dirstate._map) | |
156 | self._loweredfiles = set(encoding.lower(allfiles).split('\0')) |
|
156 | self._loweredfiles = set(encoding.lower(allfiles).split('\0')) | |
157 | self._dirstate = dirstate |
|
157 | self._dirstate = dirstate | |
158 | # The purpose of _newfiles is so that we don't complain about |
|
158 | # The purpose of _newfiles is so that we don't complain about | |
159 | # case collisions if someone were to call this object with the |
|
159 | # case collisions if someone were to call this object with the | |
160 | # same filename twice. |
|
160 | # same filename twice. | |
161 | self._newfiles = set() |
|
161 | self._newfiles = set() | |
162 |
|
162 | |||
163 | def __call__(self, f): |
|
163 | def __call__(self, f): | |
164 | if f in self._newfiles: |
|
164 | if f in self._newfiles: | |
165 | return |
|
165 | return | |
166 | fl = encoding.lower(f) |
|
166 | fl = encoding.lower(f) | |
167 | if fl in self._loweredfiles and f not in self._dirstate: |
|
167 | if fl in self._loweredfiles and f not in self._dirstate: | |
168 | msg = _('possible case-folding collision for %s') % f |
|
168 | msg = _('possible case-folding collision for %s') % f | |
169 | if self._abort: |
|
169 | if self._abort: | |
170 | raise util.Abort(msg) |
|
170 | raise util.Abort(msg) | |
171 | self._ui.warn(_("warning: %s\n") % msg) |
|
171 | self._ui.warn(_("warning: %s\n") % msg) | |
172 | self._loweredfiles.add(fl) |
|
172 | self._loweredfiles.add(fl) | |
173 | self._newfiles.add(f) |
|
173 | self._newfiles.add(f) | |
174 |
|
174 | |||
175 | class abstractvfs(object): |
|
175 | class abstractvfs(object): | |
176 | """Abstract base class; cannot be instantiated""" |
|
176 | """Abstract base class; cannot be instantiated""" | |
177 |
|
177 | |||
178 | def __init__(self, *args, **kwargs): |
|
178 | def __init__(self, *args, **kwargs): | |
179 | '''Prevent instantiation; don't call this from subclasses.''' |
|
179 | '''Prevent instantiation; don't call this from subclasses.''' | |
180 | raise NotImplementedError('attempted instantiating ' + str(type(self))) |
|
180 | raise NotImplementedError('attempted instantiating ' + str(type(self))) | |
181 |
|
181 | |||
182 | def tryread(self, path): |
|
182 | def tryread(self, path): | |
183 | '''gracefully return an empty string for missing files''' |
|
183 | '''gracefully return an empty string for missing files''' | |
184 | try: |
|
184 | try: | |
185 | return self.read(path) |
|
185 | return self.read(path) | |
186 | except IOError, inst: |
|
186 | except IOError, inst: | |
187 | if inst.errno != errno.ENOENT: |
|
187 | if inst.errno != errno.ENOENT: | |
188 | raise |
|
188 | raise | |
189 | return "" |
|
189 | return "" | |
190 |
|
190 | |||
191 | def tryreadlines(self, path, mode='rb'): |
|
191 | def tryreadlines(self, path, mode='rb'): | |
192 | '''gracefully return an empty array for missing files''' |
|
192 | '''gracefully return an empty array for missing files''' | |
193 | try: |
|
193 | try: | |
194 | return self.readlines(path, mode=mode) |
|
194 | return self.readlines(path, mode=mode) | |
195 | except IOError, inst: |
|
195 | except IOError, inst: | |
196 | if inst.errno != errno.ENOENT: |
|
196 | if inst.errno != errno.ENOENT: | |
197 | raise |
|
197 | raise | |
198 | return [] |
|
198 | return [] | |
199 |
|
199 | |||
200 | def open(self, path, mode="r", text=False, atomictemp=False, |
|
200 | def open(self, path, mode="r", text=False, atomictemp=False, | |
201 | notindexed=False): |
|
201 | notindexed=False): | |
202 | '''Open ``path`` file, which is relative to vfs root. |
|
202 | '''Open ``path`` file, which is relative to vfs root. | |
203 |
|
203 | |||
204 | Newly created directories are marked as "not to be indexed by |
|
204 | Newly created directories are marked as "not to be indexed by | |
205 | the content indexing service", if ``notindexed`` is specified |
|
205 | the content indexing service", if ``notindexed`` is specified | |
206 | for "write" mode access. |
|
206 | for "write" mode access. | |
207 | ''' |
|
207 | ''' | |
208 | self.open = self.__call__ |
|
208 | self.open = self.__call__ | |
209 | return self.__call__(path, mode, text, atomictemp, notindexed) |
|
209 | return self.__call__(path, mode, text, atomictemp, notindexed) | |
210 |
|
210 | |||
211 | def read(self, path): |
|
211 | def read(self, path): | |
212 | fp = self(path, 'rb') |
|
212 | fp = self(path, 'rb') | |
213 | try: |
|
213 | try: | |
214 | return fp.read() |
|
214 | return fp.read() | |
215 | finally: |
|
215 | finally: | |
216 | fp.close() |
|
216 | fp.close() | |
217 |
|
217 | |||
218 | def readlines(self, path, mode='rb'): |
|
218 | def readlines(self, path, mode='rb'): | |
219 | fp = self(path, mode=mode) |
|
219 | fp = self(path, mode=mode) | |
220 | try: |
|
220 | try: | |
221 | return fp.readlines() |
|
221 | return fp.readlines() | |
222 | finally: |
|
222 | finally: | |
223 | fp.close() |
|
223 | fp.close() | |
224 |
|
224 | |||
225 | def write(self, path, data): |
|
225 | def write(self, path, data): | |
226 | fp = self(path, 'wb') |
|
226 | fp = self(path, 'wb') | |
227 | try: |
|
227 | try: | |
228 | return fp.write(data) |
|
228 | return fp.write(data) | |
229 | finally: |
|
229 | finally: | |
230 | fp.close() |
|
230 | fp.close() | |
231 |
|
231 | |||
232 | def writelines(self, path, data, mode='wb', notindexed=False): |
|
232 | def writelines(self, path, data, mode='wb', notindexed=False): | |
233 | fp = self(path, mode=mode, notindexed=notindexed) |
|
233 | fp = self(path, mode=mode, notindexed=notindexed) | |
234 | try: |
|
234 | try: | |
235 | return fp.writelines(data) |
|
235 | return fp.writelines(data) | |
236 | finally: |
|
236 | finally: | |
237 | fp.close() |
|
237 | fp.close() | |
238 |
|
238 | |||
239 | def append(self, path, data): |
|
239 | def append(self, path, data): | |
240 | fp = self(path, 'ab') |
|
240 | fp = self(path, 'ab') | |
241 | try: |
|
241 | try: | |
242 | return fp.write(data) |
|
242 | return fp.write(data) | |
243 | finally: |
|
243 | finally: | |
244 | fp.close() |
|
244 | fp.close() | |
245 |
|
245 | |||
246 | def chmod(self, path, mode): |
|
246 | def chmod(self, path, mode): | |
247 | return os.chmod(self.join(path), mode) |
|
247 | return os.chmod(self.join(path), mode) | |
248 |
|
248 | |||
249 | def exists(self, path=None): |
|
249 | def exists(self, path=None): | |
250 | return os.path.exists(self.join(path)) |
|
250 | return os.path.exists(self.join(path)) | |
251 |
|
251 | |||
252 | def fstat(self, fp): |
|
252 | def fstat(self, fp): | |
253 | return util.fstat(fp) |
|
253 | return util.fstat(fp) | |
254 |
|
254 | |||
255 | def isdir(self, path=None): |
|
255 | def isdir(self, path=None): | |
256 | return os.path.isdir(self.join(path)) |
|
256 | return os.path.isdir(self.join(path)) | |
257 |
|
257 | |||
258 | def isfile(self, path=None): |
|
258 | def isfile(self, path=None): | |
259 | return os.path.isfile(self.join(path)) |
|
259 | return os.path.isfile(self.join(path)) | |
260 |
|
260 | |||
261 | def islink(self, path=None): |
|
261 | def islink(self, path=None): | |
262 | return os.path.islink(self.join(path)) |
|
262 | return os.path.islink(self.join(path)) | |
263 |
|
263 | |||
|
264 | def reljoin(self, *paths): | |||
|
265 | """join various elements of a path together (as os.path.join would do) | |||
|
266 | ||||
|
267 | The vfs base is not injected so that path stay relative. This exists | |||
|
268 | to allow handling of strange encoding if needed.""" | |||
|
269 | return os.path.join(*paths) | |||
|
270 | ||||
264 | def lexists(self, path=None): |
|
271 | def lexists(self, path=None): | |
265 | return os.path.lexists(self.join(path)) |
|
272 | return os.path.lexists(self.join(path)) | |
266 |
|
273 | |||
267 | def lstat(self, path=None): |
|
274 | def lstat(self, path=None): | |
268 | return os.lstat(self.join(path)) |
|
275 | return os.lstat(self.join(path)) | |
269 |
|
276 | |||
270 | def listdir(self, path=None): |
|
277 | def listdir(self, path=None): | |
271 | return os.listdir(self.join(path)) |
|
278 | return os.listdir(self.join(path)) | |
272 |
|
279 | |||
273 | def makedir(self, path=None, notindexed=True): |
|
280 | def makedir(self, path=None, notindexed=True): | |
274 | return util.makedir(self.join(path), notindexed) |
|
281 | return util.makedir(self.join(path), notindexed) | |
275 |
|
282 | |||
276 | def makedirs(self, path=None, mode=None): |
|
283 | def makedirs(self, path=None, mode=None): | |
277 | return util.makedirs(self.join(path), mode) |
|
284 | return util.makedirs(self.join(path), mode) | |
278 |
|
285 | |||
279 | def makelock(self, info, path): |
|
286 | def makelock(self, info, path): | |
280 | return util.makelock(info, self.join(path)) |
|
287 | return util.makelock(info, self.join(path)) | |
281 |
|
288 | |||
282 | def mkdir(self, path=None): |
|
289 | def mkdir(self, path=None): | |
283 | return os.mkdir(self.join(path)) |
|
290 | return os.mkdir(self.join(path)) | |
284 |
|
291 | |||
285 | def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False): |
|
292 | def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False): | |
286 | fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, |
|
293 | fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, | |
287 | dir=self.join(dir), text=text) |
|
294 | dir=self.join(dir), text=text) | |
288 | dname, fname = util.split(name) |
|
295 | dname, fname = util.split(name) | |
289 | if dir: |
|
296 | if dir: | |
290 | return fd, os.path.join(dir, fname) |
|
297 | return fd, os.path.join(dir, fname) | |
291 | else: |
|
298 | else: | |
292 | return fd, fname |
|
299 | return fd, fname | |
293 |
|
300 | |||
294 | def readdir(self, path=None, stat=None, skip=None): |
|
301 | def readdir(self, path=None, stat=None, skip=None): | |
295 | return osutil.listdir(self.join(path), stat, skip) |
|
302 | return osutil.listdir(self.join(path), stat, skip) | |
296 |
|
303 | |||
297 | def readlock(self, path): |
|
304 | def readlock(self, path): | |
298 | return util.readlock(self.join(path)) |
|
305 | return util.readlock(self.join(path)) | |
299 |
|
306 | |||
300 | def rename(self, src, dst): |
|
307 | def rename(self, src, dst): | |
301 | return util.rename(self.join(src), self.join(dst)) |
|
308 | return util.rename(self.join(src), self.join(dst)) | |
302 |
|
309 | |||
303 | def readlink(self, path): |
|
310 | def readlink(self, path): | |
304 | return os.readlink(self.join(path)) |
|
311 | return os.readlink(self.join(path)) | |
305 |
|
312 | |||
306 | def setflags(self, path, l, x): |
|
313 | def setflags(self, path, l, x): | |
307 | return util.setflags(self.join(path), l, x) |
|
314 | return util.setflags(self.join(path), l, x) | |
308 |
|
315 | |||
309 | def stat(self, path=None): |
|
316 | def stat(self, path=None): | |
310 | return os.stat(self.join(path)) |
|
317 | return os.stat(self.join(path)) | |
311 |
|
318 | |||
312 | def unlink(self, path=None): |
|
319 | def unlink(self, path=None): | |
313 | return util.unlink(self.join(path)) |
|
320 | return util.unlink(self.join(path)) | |
314 |
|
321 | |||
315 | def unlinkpath(self, path=None, ignoremissing=False): |
|
322 | def unlinkpath(self, path=None, ignoremissing=False): | |
316 | return util.unlinkpath(self.join(path), ignoremissing) |
|
323 | return util.unlinkpath(self.join(path), ignoremissing) | |
317 |
|
324 | |||
318 | def utime(self, path=None, t=None): |
|
325 | def utime(self, path=None, t=None): | |
319 | return os.utime(self.join(path), t) |
|
326 | return os.utime(self.join(path), t) | |
320 |
|
327 | |||
321 | class vfs(abstractvfs): |
|
328 | class vfs(abstractvfs): | |
322 | '''Operate files relative to a base directory |
|
329 | '''Operate files relative to a base directory | |
323 |
|
330 | |||
324 | This class is used to hide the details of COW semantics and |
|
331 | This class is used to hide the details of COW semantics and | |
325 | remote file access from higher level code. |
|
332 | remote file access from higher level code. | |
326 | ''' |
|
333 | ''' | |
327 | def __init__(self, base, audit=True, expandpath=False, realpath=False): |
|
334 | def __init__(self, base, audit=True, expandpath=False, realpath=False): | |
328 | if expandpath: |
|
335 | if expandpath: | |
329 | base = util.expandpath(base) |
|
336 | base = util.expandpath(base) | |
330 | if realpath: |
|
337 | if realpath: | |
331 | base = os.path.realpath(base) |
|
338 | base = os.path.realpath(base) | |
332 | self.base = base |
|
339 | self.base = base | |
333 | self._setmustaudit(audit) |
|
340 | self._setmustaudit(audit) | |
334 | self.createmode = None |
|
341 | self.createmode = None | |
335 | self._trustnlink = None |
|
342 | self._trustnlink = None | |
336 |
|
343 | |||
337 | def _getmustaudit(self): |
|
344 | def _getmustaudit(self): | |
338 | return self._audit |
|
345 | return self._audit | |
339 |
|
346 | |||
340 | def _setmustaudit(self, onoff): |
|
347 | def _setmustaudit(self, onoff): | |
341 | self._audit = onoff |
|
348 | self._audit = onoff | |
342 | if onoff: |
|
349 | if onoff: | |
343 | self.audit = pathutil.pathauditor(self.base) |
|
350 | self.audit = pathutil.pathauditor(self.base) | |
344 | else: |
|
351 | else: | |
345 | self.audit = util.always |
|
352 | self.audit = util.always | |
346 |
|
353 | |||
347 | mustaudit = property(_getmustaudit, _setmustaudit) |
|
354 | mustaudit = property(_getmustaudit, _setmustaudit) | |
348 |
|
355 | |||
349 | @util.propertycache |
|
356 | @util.propertycache | |
350 | def _cansymlink(self): |
|
357 | def _cansymlink(self): | |
351 | return util.checklink(self.base) |
|
358 | return util.checklink(self.base) | |
352 |
|
359 | |||
353 | @util.propertycache |
|
360 | @util.propertycache | |
354 | def _chmod(self): |
|
361 | def _chmod(self): | |
355 | return util.checkexec(self.base) |
|
362 | return util.checkexec(self.base) | |
356 |
|
363 | |||
357 | def _fixfilemode(self, name): |
|
364 | def _fixfilemode(self, name): | |
358 | if self.createmode is None or not self._chmod: |
|
365 | if self.createmode is None or not self._chmod: | |
359 | return |
|
366 | return | |
360 | os.chmod(name, self.createmode & 0666) |
|
367 | os.chmod(name, self.createmode & 0666) | |
361 |
|
368 | |||
362 | def __call__(self, path, mode="r", text=False, atomictemp=False, |
|
369 | def __call__(self, path, mode="r", text=False, atomictemp=False, | |
363 | notindexed=False): |
|
370 | notindexed=False): | |
364 | '''Open ``path`` file, which is relative to vfs root. |
|
371 | '''Open ``path`` file, which is relative to vfs root. | |
365 |
|
372 | |||
366 | Newly created directories are marked as "not to be indexed by |
|
373 | Newly created directories are marked as "not to be indexed by | |
367 | the content indexing service", if ``notindexed`` is specified |
|
374 | the content indexing service", if ``notindexed`` is specified | |
368 | for "write" mode access. |
|
375 | for "write" mode access. | |
369 | ''' |
|
376 | ''' | |
370 | if self._audit: |
|
377 | if self._audit: | |
371 | r = util.checkosfilename(path) |
|
378 | r = util.checkosfilename(path) | |
372 | if r: |
|
379 | if r: | |
373 | raise util.Abort("%s: %r" % (r, path)) |
|
380 | raise util.Abort("%s: %r" % (r, path)) | |
374 | self.audit(path) |
|
381 | self.audit(path) | |
375 | f = self.join(path) |
|
382 | f = self.join(path) | |
376 |
|
383 | |||
377 | if not text and "b" not in mode: |
|
384 | if not text and "b" not in mode: | |
378 | mode += "b" # for that other OS |
|
385 | mode += "b" # for that other OS | |
379 |
|
386 | |||
380 | nlink = -1 |
|
387 | nlink = -1 | |
381 | if mode not in ('r', 'rb'): |
|
388 | if mode not in ('r', 'rb'): | |
382 | dirname, basename = util.split(f) |
|
389 | dirname, basename = util.split(f) | |
383 | # If basename is empty, then the path is malformed because it points |
|
390 | # If basename is empty, then the path is malformed because it points | |
384 | # to a directory. Let the posixfile() call below raise IOError. |
|
391 | # to a directory. Let the posixfile() call below raise IOError. | |
385 | if basename: |
|
392 | if basename: | |
386 | if atomictemp: |
|
393 | if atomictemp: | |
387 | util.ensuredirs(dirname, self.createmode, notindexed) |
|
394 | util.ensuredirs(dirname, self.createmode, notindexed) | |
388 | return util.atomictempfile(f, mode, self.createmode) |
|
395 | return util.atomictempfile(f, mode, self.createmode) | |
389 | try: |
|
396 | try: | |
390 | if 'w' in mode: |
|
397 | if 'w' in mode: | |
391 | util.unlink(f) |
|
398 | util.unlink(f) | |
392 | nlink = 0 |
|
399 | nlink = 0 | |
393 | else: |
|
400 | else: | |
394 | # nlinks() may behave differently for files on Windows |
|
401 | # nlinks() may behave differently for files on Windows | |
395 | # shares if the file is open. |
|
402 | # shares if the file is open. | |
396 | fd = util.posixfile(f) |
|
403 | fd = util.posixfile(f) | |
397 | nlink = util.nlinks(f) |
|
404 | nlink = util.nlinks(f) | |
398 | if nlink < 1: |
|
405 | if nlink < 1: | |
399 | nlink = 2 # force mktempcopy (issue1922) |
|
406 | nlink = 2 # force mktempcopy (issue1922) | |
400 | fd.close() |
|
407 | fd.close() | |
401 | except (OSError, IOError), e: |
|
408 | except (OSError, IOError), e: | |
402 | if e.errno != errno.ENOENT: |
|
409 | if e.errno != errno.ENOENT: | |
403 | raise |
|
410 | raise | |
404 | nlink = 0 |
|
411 | nlink = 0 | |
405 | util.ensuredirs(dirname, self.createmode, notindexed) |
|
412 | util.ensuredirs(dirname, self.createmode, notindexed) | |
406 | if nlink > 0: |
|
413 | if nlink > 0: | |
407 | if self._trustnlink is None: |
|
414 | if self._trustnlink is None: | |
408 | self._trustnlink = nlink > 1 or util.checknlink(f) |
|
415 | self._trustnlink = nlink > 1 or util.checknlink(f) | |
409 | if nlink > 1 or not self._trustnlink: |
|
416 | if nlink > 1 or not self._trustnlink: | |
410 | util.rename(util.mktempcopy(f), f) |
|
417 | util.rename(util.mktempcopy(f), f) | |
411 | fp = util.posixfile(f, mode) |
|
418 | fp = util.posixfile(f, mode) | |
412 | if nlink == 0: |
|
419 | if nlink == 0: | |
413 | self._fixfilemode(f) |
|
420 | self._fixfilemode(f) | |
414 | return fp |
|
421 | return fp | |
415 |
|
422 | |||
416 | def symlink(self, src, dst): |
|
423 | def symlink(self, src, dst): | |
417 | self.audit(dst) |
|
424 | self.audit(dst) | |
418 | linkname = self.join(dst) |
|
425 | linkname = self.join(dst) | |
419 | try: |
|
426 | try: | |
420 | os.unlink(linkname) |
|
427 | os.unlink(linkname) | |
421 | except OSError: |
|
428 | except OSError: | |
422 | pass |
|
429 | pass | |
423 |
|
430 | |||
424 | util.ensuredirs(os.path.dirname(linkname), self.createmode) |
|
431 | util.ensuredirs(os.path.dirname(linkname), self.createmode) | |
425 |
|
432 | |||
426 | if self._cansymlink: |
|
433 | if self._cansymlink: | |
427 | try: |
|
434 | try: | |
428 | os.symlink(src, linkname) |
|
435 | os.symlink(src, linkname) | |
429 | except OSError, err: |
|
436 | except OSError, err: | |
430 | raise OSError(err.errno, _('could not symlink to %r: %s') % |
|
437 | raise OSError(err.errno, _('could not symlink to %r: %s') % | |
431 | (src, err.strerror), linkname) |
|
438 | (src, err.strerror), linkname) | |
432 | else: |
|
439 | else: | |
433 | self.write(dst, src) |
|
440 | self.write(dst, src) | |
434 |
|
441 | |||
435 | def join(self, path): |
|
442 | def join(self, path): | |
436 | if path: |
|
443 | if path: | |
437 | return os.path.join(self.base, path) |
|
444 | return os.path.join(self.base, path) | |
438 | else: |
|
445 | else: | |
439 | return self.base |
|
446 | return self.base | |
440 |
|
447 | |||
441 | opener = vfs |
|
448 | opener = vfs | |
442 |
|
449 | |||
443 | class auditvfs(object): |
|
450 | class auditvfs(object): | |
444 | def __init__(self, vfs): |
|
451 | def __init__(self, vfs): | |
445 | self.vfs = vfs |
|
452 | self.vfs = vfs | |
446 |
|
453 | |||
447 | def _getmustaudit(self): |
|
454 | def _getmustaudit(self): | |
448 | return self.vfs.mustaudit |
|
455 | return self.vfs.mustaudit | |
449 |
|
456 | |||
450 | def _setmustaudit(self, onoff): |
|
457 | def _setmustaudit(self, onoff): | |
451 | self.vfs.mustaudit = onoff |
|
458 | self.vfs.mustaudit = onoff | |
452 |
|
459 | |||
453 | mustaudit = property(_getmustaudit, _setmustaudit) |
|
460 | mustaudit = property(_getmustaudit, _setmustaudit) | |
454 |
|
461 | |||
455 | class filtervfs(abstractvfs, auditvfs): |
|
462 | class filtervfs(abstractvfs, auditvfs): | |
456 | '''Wrapper vfs for filtering filenames with a function.''' |
|
463 | '''Wrapper vfs for filtering filenames with a function.''' | |
457 |
|
464 | |||
458 | def __init__(self, vfs, filter): |
|
465 | def __init__(self, vfs, filter): | |
459 | auditvfs.__init__(self, vfs) |
|
466 | auditvfs.__init__(self, vfs) | |
460 | self._filter = filter |
|
467 | self._filter = filter | |
461 |
|
468 | |||
462 | def __call__(self, path, *args, **kwargs): |
|
469 | def __call__(self, path, *args, **kwargs): | |
463 | return self.vfs(self._filter(path), *args, **kwargs) |
|
470 | return self.vfs(self._filter(path), *args, **kwargs) | |
464 |
|
471 | |||
465 | def join(self, path): |
|
472 | def join(self, path): | |
466 | if path: |
|
473 | if path: | |
467 | return self.vfs.join(self._filter(path)) |
|
474 | return self.vfs.join(self._filter(path)) | |
468 | else: |
|
475 | else: | |
469 | return self.vfs.join(path) |
|
476 | return self.vfs.join(path) | |
470 |
|
477 | |||
471 | filteropener = filtervfs |
|
478 | filteropener = filtervfs | |
472 |
|
479 | |||
473 | class readonlyvfs(abstractvfs, auditvfs): |
|
480 | class readonlyvfs(abstractvfs, auditvfs): | |
474 | '''Wrapper vfs preventing any writing.''' |
|
481 | '''Wrapper vfs preventing any writing.''' | |
475 |
|
482 | |||
476 | def __init__(self, vfs): |
|
483 | def __init__(self, vfs): | |
477 | auditvfs.__init__(self, vfs) |
|
484 | auditvfs.__init__(self, vfs) | |
478 |
|
485 | |||
479 | def __call__(self, path, mode='r', *args, **kw): |
|
486 | def __call__(self, path, mode='r', *args, **kw): | |
480 | if mode not in ('r', 'rb'): |
|
487 | if mode not in ('r', 'rb'): | |
481 | raise util.Abort('this vfs is read only') |
|
488 | raise util.Abort('this vfs is read only') | |
482 | return self.vfs(path, mode, *args, **kw) |
|
489 | return self.vfs(path, mode, *args, **kw) | |
483 |
|
490 | |||
484 |
|
491 | |||
485 | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): |
|
492 | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): | |
486 | '''yield every hg repository under path, always recursively. |
|
493 | '''yield every hg repository under path, always recursively. | |
487 | The recurse flag will only control recursion into repo working dirs''' |
|
494 | The recurse flag will only control recursion into repo working dirs''' | |
488 | def errhandler(err): |
|
495 | def errhandler(err): | |
489 | if err.filename == path: |
|
496 | if err.filename == path: | |
490 | raise err |
|
497 | raise err | |
491 | samestat = getattr(os.path, 'samestat', None) |
|
498 | samestat = getattr(os.path, 'samestat', None) | |
492 | if followsym and samestat is not None: |
|
499 | if followsym and samestat is not None: | |
493 | def adddir(dirlst, dirname): |
|
500 | def adddir(dirlst, dirname): | |
494 | match = False |
|
501 | match = False | |
495 | dirstat = os.stat(dirname) |
|
502 | dirstat = os.stat(dirname) | |
496 | for lstdirstat in dirlst: |
|
503 | for lstdirstat in dirlst: | |
497 | if samestat(dirstat, lstdirstat): |
|
504 | if samestat(dirstat, lstdirstat): | |
498 | match = True |
|
505 | match = True | |
499 | break |
|
506 | break | |
500 | if not match: |
|
507 | if not match: | |
501 | dirlst.append(dirstat) |
|
508 | dirlst.append(dirstat) | |
502 | return not match |
|
509 | return not match | |
503 | else: |
|
510 | else: | |
504 | followsym = False |
|
511 | followsym = False | |
505 |
|
512 | |||
506 | if (seen_dirs is None) and followsym: |
|
513 | if (seen_dirs is None) and followsym: | |
507 | seen_dirs = [] |
|
514 | seen_dirs = [] | |
508 | adddir(seen_dirs, path) |
|
515 | adddir(seen_dirs, path) | |
509 | for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler): |
|
516 | for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler): | |
510 | dirs.sort() |
|
517 | dirs.sort() | |
511 | if '.hg' in dirs: |
|
518 | if '.hg' in dirs: | |
512 | yield root # found a repository |
|
519 | yield root # found a repository | |
513 | qroot = os.path.join(root, '.hg', 'patches') |
|
520 | qroot = os.path.join(root, '.hg', 'patches') | |
514 | if os.path.isdir(os.path.join(qroot, '.hg')): |
|
521 | if os.path.isdir(os.path.join(qroot, '.hg')): | |
515 | yield qroot # we have a patch queue repo here |
|
522 | yield qroot # we have a patch queue repo here | |
516 | if recurse: |
|
523 | if recurse: | |
517 | # avoid recursing inside the .hg directory |
|
524 | # avoid recursing inside the .hg directory | |
518 | dirs.remove('.hg') |
|
525 | dirs.remove('.hg') | |
519 | else: |
|
526 | else: | |
520 | dirs[:] = [] # don't descend further |
|
527 | dirs[:] = [] # don't descend further | |
521 | elif followsym: |
|
528 | elif followsym: | |
522 | newdirs = [] |
|
529 | newdirs = [] | |
523 | for d in dirs: |
|
530 | for d in dirs: | |
524 | fname = os.path.join(root, d) |
|
531 | fname = os.path.join(root, d) | |
525 | if adddir(seen_dirs, fname): |
|
532 | if adddir(seen_dirs, fname): | |
526 | if os.path.islink(fname): |
|
533 | if os.path.islink(fname): | |
527 | for hgname in walkrepos(fname, True, seen_dirs): |
|
534 | for hgname in walkrepos(fname, True, seen_dirs): | |
528 | yield hgname |
|
535 | yield hgname | |
529 | else: |
|
536 | else: | |
530 | newdirs.append(d) |
|
537 | newdirs.append(d) | |
531 | dirs[:] = newdirs |
|
538 | dirs[:] = newdirs | |
532 |
|
539 | |||
533 | def osrcpath(): |
|
540 | def osrcpath(): | |
534 | '''return default os-specific hgrc search path''' |
|
541 | '''return default os-specific hgrc search path''' | |
535 | path = [] |
|
542 | path = [] | |
536 | defaultpath = os.path.join(util.datapath, 'default.d') |
|
543 | defaultpath = os.path.join(util.datapath, 'default.d') | |
537 | if os.path.isdir(defaultpath): |
|
544 | if os.path.isdir(defaultpath): | |
538 | for f, kind in osutil.listdir(defaultpath): |
|
545 | for f, kind in osutil.listdir(defaultpath): | |
539 | if f.endswith('.rc'): |
|
546 | if f.endswith('.rc'): | |
540 | path.append(os.path.join(defaultpath, f)) |
|
547 | path.append(os.path.join(defaultpath, f)) | |
541 | path.extend(systemrcpath()) |
|
548 | path.extend(systemrcpath()) | |
542 | path.extend(userrcpath()) |
|
549 | path.extend(userrcpath()) | |
543 | path = [os.path.normpath(f) for f in path] |
|
550 | path = [os.path.normpath(f) for f in path] | |
544 | return path |
|
551 | return path | |
545 |
|
552 | |||
546 | _rcpath = None |
|
553 | _rcpath = None | |
547 |
|
554 | |||
548 | def rcpath(): |
|
555 | def rcpath(): | |
549 | '''return hgrc search path. if env var HGRCPATH is set, use it. |
|
556 | '''return hgrc search path. if env var HGRCPATH is set, use it. | |
550 | for each item in path, if directory, use files ending in .rc, |
|
557 | for each item in path, if directory, use files ending in .rc, | |
551 | else use item. |
|
558 | else use item. | |
552 | make HGRCPATH empty to only look in .hg/hgrc of current repo. |
|
559 | make HGRCPATH empty to only look in .hg/hgrc of current repo. | |
553 | if no HGRCPATH, use default os-specific path.''' |
|
560 | if no HGRCPATH, use default os-specific path.''' | |
554 | global _rcpath |
|
561 | global _rcpath | |
555 | if _rcpath is None: |
|
562 | if _rcpath is None: | |
556 | if 'HGRCPATH' in os.environ: |
|
563 | if 'HGRCPATH' in os.environ: | |
557 | _rcpath = [] |
|
564 | _rcpath = [] | |
558 | for p in os.environ['HGRCPATH'].split(os.pathsep): |
|
565 | for p in os.environ['HGRCPATH'].split(os.pathsep): | |
559 | if not p: |
|
566 | if not p: | |
560 | continue |
|
567 | continue | |
561 | p = util.expandpath(p) |
|
568 | p = util.expandpath(p) | |
562 | if os.path.isdir(p): |
|
569 | if os.path.isdir(p): | |
563 | for f, kind in osutil.listdir(p): |
|
570 | for f, kind in osutil.listdir(p): | |
564 | if f.endswith('.rc'): |
|
571 | if f.endswith('.rc'): | |
565 | _rcpath.append(os.path.join(p, f)) |
|
572 | _rcpath.append(os.path.join(p, f)) | |
566 | else: |
|
573 | else: | |
567 | _rcpath.append(p) |
|
574 | _rcpath.append(p) | |
568 | else: |
|
575 | else: | |
569 | _rcpath = osrcpath() |
|
576 | _rcpath = osrcpath() | |
570 | return _rcpath |
|
577 | return _rcpath | |
571 |
|
578 | |||
572 | def revsingle(repo, revspec, default='.'): |
|
579 | def revsingle(repo, revspec, default='.'): | |
573 | if not revspec and revspec != 0: |
|
580 | if not revspec and revspec != 0: | |
574 | return repo[default] |
|
581 | return repo[default] | |
575 |
|
582 | |||
576 | l = revrange(repo, [revspec]) |
|
583 | l = revrange(repo, [revspec]) | |
577 | if not l: |
|
584 | if not l: | |
578 | raise util.Abort(_('empty revision set')) |
|
585 | raise util.Abort(_('empty revision set')) | |
579 | return repo[l.last()] |
|
586 | return repo[l.last()] | |
580 |
|
587 | |||
581 | def revpair(repo, revs): |
|
588 | def revpair(repo, revs): | |
582 | if not revs: |
|
589 | if not revs: | |
583 | return repo.dirstate.p1(), None |
|
590 | return repo.dirstate.p1(), None | |
584 |
|
591 | |||
585 | l = revrange(repo, revs) |
|
592 | l = revrange(repo, revs) | |
586 |
|
593 | |||
587 | if not l: |
|
594 | if not l: | |
588 | first = second = None |
|
595 | first = second = None | |
589 | elif l.isascending(): |
|
596 | elif l.isascending(): | |
590 | first = l.min() |
|
597 | first = l.min() | |
591 | second = l.max() |
|
598 | second = l.max() | |
592 | elif l.isdescending(): |
|
599 | elif l.isdescending(): | |
593 | first = l.max() |
|
600 | first = l.max() | |
594 | second = l.min() |
|
601 | second = l.min() | |
595 | else: |
|
602 | else: | |
596 | first = l.first() |
|
603 | first = l.first() | |
597 | second = l.last() |
|
604 | second = l.last() | |
598 |
|
605 | |||
599 | if first is None: |
|
606 | if first is None: | |
600 | raise util.Abort(_('empty revision range')) |
|
607 | raise util.Abort(_('empty revision range')) | |
601 |
|
608 | |||
602 | if first == second and len(revs) == 1 and _revrangesep not in revs[0]: |
|
609 | if first == second and len(revs) == 1 and _revrangesep not in revs[0]: | |
603 | return repo.lookup(first), None |
|
610 | return repo.lookup(first), None | |
604 |
|
611 | |||
605 | return repo.lookup(first), repo.lookup(second) |
|
612 | return repo.lookup(first), repo.lookup(second) | |
606 |
|
613 | |||
607 | _revrangesep = ':' |
|
614 | _revrangesep = ':' | |
608 |
|
615 | |||
609 | def revrange(repo, revs): |
|
616 | def revrange(repo, revs): | |
610 | """Yield revision as strings from a list of revision specifications.""" |
|
617 | """Yield revision as strings from a list of revision specifications.""" | |
611 |
|
618 | |||
612 | def revfix(repo, val, defval): |
|
619 | def revfix(repo, val, defval): | |
613 | if not val and val != 0 and defval is not None: |
|
620 | if not val and val != 0 and defval is not None: | |
614 | return defval |
|
621 | return defval | |
615 | return repo[val].rev() |
|
622 | return repo[val].rev() | |
616 |
|
623 | |||
617 | seen, l = set(), revset.baseset([]) |
|
624 | seen, l = set(), revset.baseset([]) | |
618 | for spec in revs: |
|
625 | for spec in revs: | |
619 | if l and not seen: |
|
626 | if l and not seen: | |
620 | seen = set(l) |
|
627 | seen = set(l) | |
621 | # attempt to parse old-style ranges first to deal with |
|
628 | # attempt to parse old-style ranges first to deal with | |
622 | # things like old-tag which contain query metacharacters |
|
629 | # things like old-tag which contain query metacharacters | |
623 | try: |
|
630 | try: | |
624 | if isinstance(spec, int): |
|
631 | if isinstance(spec, int): | |
625 | seen.add(spec) |
|
632 | seen.add(spec) | |
626 | l = l + revset.baseset([spec]) |
|
633 | l = l + revset.baseset([spec]) | |
627 | continue |
|
634 | continue | |
628 |
|
635 | |||
629 | if _revrangesep in spec: |
|
636 | if _revrangesep in spec: | |
630 | start, end = spec.split(_revrangesep, 1) |
|
637 | start, end = spec.split(_revrangesep, 1) | |
631 | start = revfix(repo, start, 0) |
|
638 | start = revfix(repo, start, 0) | |
632 | end = revfix(repo, end, len(repo) - 1) |
|
639 | end = revfix(repo, end, len(repo) - 1) | |
633 | if end == nullrev and start < 0: |
|
640 | if end == nullrev and start < 0: | |
634 | start = nullrev |
|
641 | start = nullrev | |
635 | rangeiter = repo.changelog.revs(start, end) |
|
642 | rangeiter = repo.changelog.revs(start, end) | |
636 | if not seen and not l: |
|
643 | if not seen and not l: | |
637 | # by far the most common case: revs = ["-1:0"] |
|
644 | # by far the most common case: revs = ["-1:0"] | |
638 | l = revset.baseset(rangeiter) |
|
645 | l = revset.baseset(rangeiter) | |
639 | # defer syncing seen until next iteration |
|
646 | # defer syncing seen until next iteration | |
640 | continue |
|
647 | continue | |
641 | newrevs = set(rangeiter) |
|
648 | newrevs = set(rangeiter) | |
642 | if seen: |
|
649 | if seen: | |
643 | newrevs.difference_update(seen) |
|
650 | newrevs.difference_update(seen) | |
644 | seen.update(newrevs) |
|
651 | seen.update(newrevs) | |
645 | else: |
|
652 | else: | |
646 | seen = newrevs |
|
653 | seen = newrevs | |
647 | l = l + revset.baseset(sorted(newrevs, reverse=start > end)) |
|
654 | l = l + revset.baseset(sorted(newrevs, reverse=start > end)) | |
648 | continue |
|
655 | continue | |
649 | elif spec and spec in repo: # single unquoted rev |
|
656 | elif spec and spec in repo: # single unquoted rev | |
650 | rev = revfix(repo, spec, None) |
|
657 | rev = revfix(repo, spec, None) | |
651 | if rev in seen: |
|
658 | if rev in seen: | |
652 | continue |
|
659 | continue | |
653 | seen.add(rev) |
|
660 | seen.add(rev) | |
654 | l = l + revset.baseset([rev]) |
|
661 | l = l + revset.baseset([rev]) | |
655 | continue |
|
662 | continue | |
656 | except error.RepoLookupError: |
|
663 | except error.RepoLookupError: | |
657 | pass |
|
664 | pass | |
658 |
|
665 | |||
659 | # fall through to new-style queries if old-style fails |
|
666 | # fall through to new-style queries if old-style fails | |
660 | m = revset.match(repo.ui, spec, repo) |
|
667 | m = revset.match(repo.ui, spec, repo) | |
661 | if seen or l: |
|
668 | if seen or l: | |
662 | dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen] |
|
669 | dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen] | |
663 | l = l + revset.baseset(dl) |
|
670 | l = l + revset.baseset(dl) | |
664 | seen.update(dl) |
|
671 | seen.update(dl) | |
665 | else: |
|
672 | else: | |
666 | l = m(repo, revset.spanset(repo)) |
|
673 | l = m(repo, revset.spanset(repo)) | |
667 |
|
674 | |||
668 | return l |
|
675 | return l | |
669 |
|
676 | |||
670 | def expandpats(pats): |
|
677 | def expandpats(pats): | |
671 | '''Expand bare globs when running on windows. |
|
678 | '''Expand bare globs when running on windows. | |
672 | On posix we assume it already has already been done by sh.''' |
|
679 | On posix we assume it already has already been done by sh.''' | |
673 | if not util.expandglobs: |
|
680 | if not util.expandglobs: | |
674 | return list(pats) |
|
681 | return list(pats) | |
675 | ret = [] |
|
682 | ret = [] | |
676 | for kindpat in pats: |
|
683 | for kindpat in pats: | |
677 | kind, pat = matchmod._patsplit(kindpat, None) |
|
684 | kind, pat = matchmod._patsplit(kindpat, None) | |
678 | if kind is None: |
|
685 | if kind is None: | |
679 | try: |
|
686 | try: | |
680 | globbed = glob.glob(pat) |
|
687 | globbed = glob.glob(pat) | |
681 | except re.error: |
|
688 | except re.error: | |
682 | globbed = [pat] |
|
689 | globbed = [pat] | |
683 | if globbed: |
|
690 | if globbed: | |
684 | ret.extend(globbed) |
|
691 | ret.extend(globbed) | |
685 | continue |
|
692 | continue | |
686 | ret.append(kindpat) |
|
693 | ret.append(kindpat) | |
687 | return ret |
|
694 | return ret | |
688 |
|
695 | |||
689 | def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'): |
|
696 | def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'): | |
690 | '''Return a matcher and the patterns that were used. |
|
697 | '''Return a matcher and the patterns that were used. | |
691 | The matcher will warn about bad matches.''' |
|
698 | The matcher will warn about bad matches.''' | |
692 | if pats == ("",): |
|
699 | if pats == ("",): | |
693 | pats = [] |
|
700 | pats = [] | |
694 | if not globbed and default == 'relpath': |
|
701 | if not globbed and default == 'relpath': | |
695 | pats = expandpats(pats or []) |
|
702 | pats = expandpats(pats or []) | |
696 |
|
703 | |||
697 | m = ctx.match(pats, opts.get('include'), opts.get('exclude'), |
|
704 | m = ctx.match(pats, opts.get('include'), opts.get('exclude'), | |
698 | default) |
|
705 | default) | |
699 | def badfn(f, msg): |
|
706 | def badfn(f, msg): | |
700 | ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg)) |
|
707 | ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg)) | |
701 | m.bad = badfn |
|
708 | m.bad = badfn | |
702 | return m, pats |
|
709 | return m, pats | |
703 |
|
710 | |||
704 | def match(ctx, pats=[], opts={}, globbed=False, default='relpath'): |
|
711 | def match(ctx, pats=[], opts={}, globbed=False, default='relpath'): | |
705 | '''Return a matcher that will warn about bad matches.''' |
|
712 | '''Return a matcher that will warn about bad matches.''' | |
706 | return matchandpats(ctx, pats, opts, globbed, default)[0] |
|
713 | return matchandpats(ctx, pats, opts, globbed, default)[0] | |
707 |
|
714 | |||
708 | def matchall(repo): |
|
715 | def matchall(repo): | |
709 | '''Return a matcher that will efficiently match everything.''' |
|
716 | '''Return a matcher that will efficiently match everything.''' | |
710 | return matchmod.always(repo.root, repo.getcwd()) |
|
717 | return matchmod.always(repo.root, repo.getcwd()) | |
711 |
|
718 | |||
712 | def matchfiles(repo, files): |
|
719 | def matchfiles(repo, files): | |
713 | '''Return a matcher that will efficiently match exactly these files.''' |
|
720 | '''Return a matcher that will efficiently match exactly these files.''' | |
714 | return matchmod.exact(repo.root, repo.getcwd(), files) |
|
721 | return matchmod.exact(repo.root, repo.getcwd(), files) | |
715 |
|
722 | |||
716 | def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None): |
|
723 | def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None): | |
717 | m = matcher |
|
724 | m = matcher | |
718 | if dry_run is None: |
|
725 | if dry_run is None: | |
719 | dry_run = opts.get('dry_run') |
|
726 | dry_run = opts.get('dry_run') | |
720 | if similarity is None: |
|
727 | if similarity is None: | |
721 | similarity = float(opts.get('similarity') or 0) |
|
728 | similarity = float(opts.get('similarity') or 0) | |
722 |
|
729 | |||
723 | ret = 0 |
|
730 | ret = 0 | |
724 | join = lambda f: os.path.join(prefix, f) |
|
731 | join = lambda f: os.path.join(prefix, f) | |
725 |
|
732 | |||
726 | def matchessubrepo(matcher, subpath): |
|
733 | def matchessubrepo(matcher, subpath): | |
727 | if matcher.exact(subpath): |
|
734 | if matcher.exact(subpath): | |
728 | return True |
|
735 | return True | |
729 | for f in matcher.files(): |
|
736 | for f in matcher.files(): | |
730 | if f.startswith(subpath): |
|
737 | if f.startswith(subpath): | |
731 | return True |
|
738 | return True | |
732 | return False |
|
739 | return False | |
733 |
|
740 | |||
734 | wctx = repo[None] |
|
741 | wctx = repo[None] | |
735 | for subpath in sorted(wctx.substate): |
|
742 | for subpath in sorted(wctx.substate): | |
736 | if opts.get('subrepos') or matchessubrepo(m, subpath): |
|
743 | if opts.get('subrepos') or matchessubrepo(m, subpath): | |
737 | sub = wctx.sub(subpath) |
|
744 | sub = wctx.sub(subpath) | |
738 | try: |
|
745 | try: | |
739 | submatch = matchmod.narrowmatcher(subpath, m) |
|
746 | submatch = matchmod.narrowmatcher(subpath, m) | |
740 | if sub.addremove(submatch, prefix, opts, dry_run, similarity): |
|
747 | if sub.addremove(submatch, prefix, opts, dry_run, similarity): | |
741 | ret = 1 |
|
748 | ret = 1 | |
742 | except error.LookupError: |
|
749 | except error.LookupError: | |
743 | repo.ui.status(_("skipping missing subrepository: %s\n") |
|
750 | repo.ui.status(_("skipping missing subrepository: %s\n") | |
744 | % join(subpath)) |
|
751 | % join(subpath)) | |
745 |
|
752 | |||
746 | rejected = [] |
|
753 | rejected = [] | |
747 | origbad = m.bad |
|
754 | origbad = m.bad | |
748 | def badfn(f, msg): |
|
755 | def badfn(f, msg): | |
749 | if f in m.files(): |
|
756 | if f in m.files(): | |
750 | origbad(f, msg) |
|
757 | origbad(f, msg) | |
751 | rejected.append(f) |
|
758 | rejected.append(f) | |
752 |
|
759 | |||
753 | m.bad = badfn |
|
760 | m.bad = badfn | |
754 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) |
|
761 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) | |
755 | m.bad = origbad |
|
762 | m.bad = origbad | |
756 |
|
763 | |||
757 | unknownset = set(unknown + forgotten) |
|
764 | unknownset = set(unknown + forgotten) | |
758 | toprint = unknownset.copy() |
|
765 | toprint = unknownset.copy() | |
759 | toprint.update(deleted) |
|
766 | toprint.update(deleted) | |
760 | for abs in sorted(toprint): |
|
767 | for abs in sorted(toprint): | |
761 | if repo.ui.verbose or not m.exact(abs): |
|
768 | if repo.ui.verbose or not m.exact(abs): | |
762 | if abs in unknownset: |
|
769 | if abs in unknownset: | |
763 | status = _('adding %s\n') % m.uipath(join(abs)) |
|
770 | status = _('adding %s\n') % m.uipath(join(abs)) | |
764 | else: |
|
771 | else: | |
765 | status = _('removing %s\n') % m.uipath(join(abs)) |
|
772 | status = _('removing %s\n') % m.uipath(join(abs)) | |
766 | repo.ui.status(status) |
|
773 | repo.ui.status(status) | |
767 |
|
774 | |||
768 | renames = _findrenames(repo, m, added + unknown, removed + deleted, |
|
775 | renames = _findrenames(repo, m, added + unknown, removed + deleted, | |
769 | similarity) |
|
776 | similarity) | |
770 |
|
777 | |||
771 | if not dry_run: |
|
778 | if not dry_run: | |
772 | _markchanges(repo, unknown + forgotten, deleted, renames) |
|
779 | _markchanges(repo, unknown + forgotten, deleted, renames) | |
773 |
|
780 | |||
774 | for f in rejected: |
|
781 | for f in rejected: | |
775 | if f in m.files(): |
|
782 | if f in m.files(): | |
776 | return 1 |
|
783 | return 1 | |
777 | return ret |
|
784 | return ret | |
778 |
|
785 | |||
779 | def marktouched(repo, files, similarity=0.0): |
|
786 | def marktouched(repo, files, similarity=0.0): | |
780 | '''Assert that files have somehow been operated upon. files are relative to |
|
787 | '''Assert that files have somehow been operated upon. files are relative to | |
781 | the repo root.''' |
|
788 | the repo root.''' | |
782 | m = matchfiles(repo, files) |
|
789 | m = matchfiles(repo, files) | |
783 | rejected = [] |
|
790 | rejected = [] | |
784 | m.bad = lambda x, y: rejected.append(x) |
|
791 | m.bad = lambda x, y: rejected.append(x) | |
785 |
|
792 | |||
786 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) |
|
793 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) | |
787 |
|
794 | |||
788 | if repo.ui.verbose: |
|
795 | if repo.ui.verbose: | |
789 | unknownset = set(unknown + forgotten) |
|
796 | unknownset = set(unknown + forgotten) | |
790 | toprint = unknownset.copy() |
|
797 | toprint = unknownset.copy() | |
791 | toprint.update(deleted) |
|
798 | toprint.update(deleted) | |
792 | for abs in sorted(toprint): |
|
799 | for abs in sorted(toprint): | |
793 | if abs in unknownset: |
|
800 | if abs in unknownset: | |
794 | status = _('adding %s\n') % abs |
|
801 | status = _('adding %s\n') % abs | |
795 | else: |
|
802 | else: | |
796 | status = _('removing %s\n') % abs |
|
803 | status = _('removing %s\n') % abs | |
797 | repo.ui.status(status) |
|
804 | repo.ui.status(status) | |
798 |
|
805 | |||
799 | renames = _findrenames(repo, m, added + unknown, removed + deleted, |
|
806 | renames = _findrenames(repo, m, added + unknown, removed + deleted, | |
800 | similarity) |
|
807 | similarity) | |
801 |
|
808 | |||
802 | _markchanges(repo, unknown + forgotten, deleted, renames) |
|
809 | _markchanges(repo, unknown + forgotten, deleted, renames) | |
803 |
|
810 | |||
804 | for f in rejected: |
|
811 | for f in rejected: | |
805 | if f in m.files(): |
|
812 | if f in m.files(): | |
806 | return 1 |
|
813 | return 1 | |
807 | return 0 |
|
814 | return 0 | |
808 |
|
815 | |||
809 | def _interestingfiles(repo, matcher): |
|
816 | def _interestingfiles(repo, matcher): | |
810 | '''Walk dirstate with matcher, looking for files that addremove would care |
|
817 | '''Walk dirstate with matcher, looking for files that addremove would care | |
811 | about. |
|
818 | about. | |
812 |
|
819 | |||
813 | This is different from dirstate.status because it doesn't care about |
|
820 | This is different from dirstate.status because it doesn't care about | |
814 | whether files are modified or clean.''' |
|
821 | whether files are modified or clean.''' | |
815 | added, unknown, deleted, removed, forgotten = [], [], [], [], [] |
|
822 | added, unknown, deleted, removed, forgotten = [], [], [], [], [] | |
816 | audit_path = pathutil.pathauditor(repo.root) |
|
823 | audit_path = pathutil.pathauditor(repo.root) | |
817 |
|
824 | |||
818 | ctx = repo[None] |
|
825 | ctx = repo[None] | |
819 | dirstate = repo.dirstate |
|
826 | dirstate = repo.dirstate | |
820 | walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False, |
|
827 | walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False, | |
821 | full=False) |
|
828 | full=False) | |
822 | for abs, st in walkresults.iteritems(): |
|
829 | for abs, st in walkresults.iteritems(): | |
823 | dstate = dirstate[abs] |
|
830 | dstate = dirstate[abs] | |
824 | if dstate == '?' and audit_path.check(abs): |
|
831 | if dstate == '?' and audit_path.check(abs): | |
825 | unknown.append(abs) |
|
832 | unknown.append(abs) | |
826 | elif dstate != 'r' and not st: |
|
833 | elif dstate != 'r' and not st: | |
827 | deleted.append(abs) |
|
834 | deleted.append(abs) | |
828 | elif dstate == 'r' and st: |
|
835 | elif dstate == 'r' and st: | |
829 | forgotten.append(abs) |
|
836 | forgotten.append(abs) | |
830 | # for finding renames |
|
837 | # for finding renames | |
831 | elif dstate == 'r' and not st: |
|
838 | elif dstate == 'r' and not st: | |
832 | removed.append(abs) |
|
839 | removed.append(abs) | |
833 | elif dstate == 'a': |
|
840 | elif dstate == 'a': | |
834 | added.append(abs) |
|
841 | added.append(abs) | |
835 |
|
842 | |||
836 | return added, unknown, deleted, removed, forgotten |
|
843 | return added, unknown, deleted, removed, forgotten | |
837 |
|
844 | |||
838 | def _findrenames(repo, matcher, added, removed, similarity): |
|
845 | def _findrenames(repo, matcher, added, removed, similarity): | |
839 | '''Find renames from removed files to added ones.''' |
|
846 | '''Find renames from removed files to added ones.''' | |
840 | renames = {} |
|
847 | renames = {} | |
841 | if similarity > 0: |
|
848 | if similarity > 0: | |
842 | for old, new, score in similar.findrenames(repo, added, removed, |
|
849 | for old, new, score in similar.findrenames(repo, added, removed, | |
843 | similarity): |
|
850 | similarity): | |
844 | if (repo.ui.verbose or not matcher.exact(old) |
|
851 | if (repo.ui.verbose or not matcher.exact(old) | |
845 | or not matcher.exact(new)): |
|
852 | or not matcher.exact(new)): | |
846 | repo.ui.status(_('recording removal of %s as rename to %s ' |
|
853 | repo.ui.status(_('recording removal of %s as rename to %s ' | |
847 | '(%d%% similar)\n') % |
|
854 | '(%d%% similar)\n') % | |
848 | (matcher.rel(old), matcher.rel(new), |
|
855 | (matcher.rel(old), matcher.rel(new), | |
849 | score * 100)) |
|
856 | score * 100)) | |
850 | renames[new] = old |
|
857 | renames[new] = old | |
851 | return renames |
|
858 | return renames | |
852 |
|
859 | |||
853 | def _markchanges(repo, unknown, deleted, renames): |
|
860 | def _markchanges(repo, unknown, deleted, renames): | |
854 | '''Marks the files in unknown as added, the files in deleted as removed, |
|
861 | '''Marks the files in unknown as added, the files in deleted as removed, | |
855 | and the files in renames as copied.''' |
|
862 | and the files in renames as copied.''' | |
856 | wctx = repo[None] |
|
863 | wctx = repo[None] | |
857 | wlock = repo.wlock() |
|
864 | wlock = repo.wlock() | |
858 | try: |
|
865 | try: | |
859 | wctx.forget(deleted) |
|
866 | wctx.forget(deleted) | |
860 | wctx.add(unknown) |
|
867 | wctx.add(unknown) | |
861 | for new, old in renames.iteritems(): |
|
868 | for new, old in renames.iteritems(): | |
862 | wctx.copy(old, new) |
|
869 | wctx.copy(old, new) | |
863 | finally: |
|
870 | finally: | |
864 | wlock.release() |
|
871 | wlock.release() | |
865 |
|
872 | |||
866 | def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None): |
|
873 | def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None): | |
867 | """Update the dirstate to reflect the intent of copying src to dst. For |
|
874 | """Update the dirstate to reflect the intent of copying src to dst. For | |
868 | different reasons it might not end with dst being marked as copied from src. |
|
875 | different reasons it might not end with dst being marked as copied from src. | |
869 | """ |
|
876 | """ | |
870 | origsrc = repo.dirstate.copied(src) or src |
|
877 | origsrc = repo.dirstate.copied(src) or src | |
871 | if dst == origsrc: # copying back a copy? |
|
878 | if dst == origsrc: # copying back a copy? | |
872 | if repo.dirstate[dst] not in 'mn' and not dryrun: |
|
879 | if repo.dirstate[dst] not in 'mn' and not dryrun: | |
873 | repo.dirstate.normallookup(dst) |
|
880 | repo.dirstate.normallookup(dst) | |
874 | else: |
|
881 | else: | |
875 | if repo.dirstate[origsrc] == 'a' and origsrc == src: |
|
882 | if repo.dirstate[origsrc] == 'a' and origsrc == src: | |
876 | if not ui.quiet: |
|
883 | if not ui.quiet: | |
877 | ui.warn(_("%s has not been committed yet, so no copy " |
|
884 | ui.warn(_("%s has not been committed yet, so no copy " | |
878 | "data will be stored for %s.\n") |
|
885 | "data will be stored for %s.\n") | |
879 | % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))) |
|
886 | % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))) | |
880 | if repo.dirstate[dst] in '?r' and not dryrun: |
|
887 | if repo.dirstate[dst] in '?r' and not dryrun: | |
881 | wctx.add([dst]) |
|
888 | wctx.add([dst]) | |
882 | elif not dryrun: |
|
889 | elif not dryrun: | |
883 | wctx.copy(origsrc, dst) |
|
890 | wctx.copy(origsrc, dst) | |
884 |
|
891 | |||
885 | def readrequires(opener, supported): |
|
892 | def readrequires(opener, supported): | |
886 | '''Reads and parses .hg/requires and checks if all entries found |
|
893 | '''Reads and parses .hg/requires and checks if all entries found | |
887 | are in the list of supported features.''' |
|
894 | are in the list of supported features.''' | |
888 | requirements = set(opener.read("requires").splitlines()) |
|
895 | requirements = set(opener.read("requires").splitlines()) | |
889 | missings = [] |
|
896 | missings = [] | |
890 | for r in requirements: |
|
897 | for r in requirements: | |
891 | if r not in supported: |
|
898 | if r not in supported: | |
892 | if not r or not r[0].isalnum(): |
|
899 | if not r or not r[0].isalnum(): | |
893 | raise error.RequirementError(_(".hg/requires file is corrupt")) |
|
900 | raise error.RequirementError(_(".hg/requires file is corrupt")) | |
894 | missings.append(r) |
|
901 | missings.append(r) | |
895 | missings.sort() |
|
902 | missings.sort() | |
896 | if missings: |
|
903 | if missings: | |
897 | raise error.RequirementError( |
|
904 | raise error.RequirementError( | |
898 | _("repository requires features unknown to this Mercurial: %s") |
|
905 | _("repository requires features unknown to this Mercurial: %s") | |
899 | % " ".join(missings), |
|
906 | % " ".join(missings), | |
900 | hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement" |
|
907 | hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement" | |
901 | " for more information")) |
|
908 | " for more information")) | |
902 | return requirements |
|
909 | return requirements | |
903 |
|
910 | |||
904 | class filecachesubentry(object): |
|
911 | class filecachesubentry(object): | |
905 | def __init__(self, path, stat): |
|
912 | def __init__(self, path, stat): | |
906 | self.path = path |
|
913 | self.path = path | |
907 | self.cachestat = None |
|
914 | self.cachestat = None | |
908 | self._cacheable = None |
|
915 | self._cacheable = None | |
909 |
|
916 | |||
910 | if stat: |
|
917 | if stat: | |
911 | self.cachestat = filecachesubentry.stat(self.path) |
|
918 | self.cachestat = filecachesubentry.stat(self.path) | |
912 |
|
919 | |||
913 | if self.cachestat: |
|
920 | if self.cachestat: | |
914 | self._cacheable = self.cachestat.cacheable() |
|
921 | self._cacheable = self.cachestat.cacheable() | |
915 | else: |
|
922 | else: | |
916 | # None means we don't know yet |
|
923 | # None means we don't know yet | |
917 | self._cacheable = None |
|
924 | self._cacheable = None | |
918 |
|
925 | |||
919 | def refresh(self): |
|
926 | def refresh(self): | |
920 | if self.cacheable(): |
|
927 | if self.cacheable(): | |
921 | self.cachestat = filecachesubentry.stat(self.path) |
|
928 | self.cachestat = filecachesubentry.stat(self.path) | |
922 |
|
929 | |||
923 | def cacheable(self): |
|
930 | def cacheable(self): | |
924 | if self._cacheable is not None: |
|
931 | if self._cacheable is not None: | |
925 | return self._cacheable |
|
932 | return self._cacheable | |
926 |
|
933 | |||
927 | # we don't know yet, assume it is for now |
|
934 | # we don't know yet, assume it is for now | |
928 | return True |
|
935 | return True | |
929 |
|
936 | |||
930 | def changed(self): |
|
937 | def changed(self): | |
931 | # no point in going further if we can't cache it |
|
938 | # no point in going further if we can't cache it | |
932 | if not self.cacheable(): |
|
939 | if not self.cacheable(): | |
933 | return True |
|
940 | return True | |
934 |
|
941 | |||
935 | newstat = filecachesubentry.stat(self.path) |
|
942 | newstat = filecachesubentry.stat(self.path) | |
936 |
|
943 | |||
937 | # we may not know if it's cacheable yet, check again now |
|
944 | # we may not know if it's cacheable yet, check again now | |
938 | if newstat and self._cacheable is None: |
|
945 | if newstat and self._cacheable is None: | |
939 | self._cacheable = newstat.cacheable() |
|
946 | self._cacheable = newstat.cacheable() | |
940 |
|
947 | |||
941 | # check again |
|
948 | # check again | |
942 | if not self._cacheable: |
|
949 | if not self._cacheable: | |
943 | return True |
|
950 | return True | |
944 |
|
951 | |||
945 | if self.cachestat != newstat: |
|
952 | if self.cachestat != newstat: | |
946 | self.cachestat = newstat |
|
953 | self.cachestat = newstat | |
947 | return True |
|
954 | return True | |
948 | else: |
|
955 | else: | |
949 | return False |
|
956 | return False | |
950 |
|
957 | |||
951 | @staticmethod |
|
958 | @staticmethod | |
952 | def stat(path): |
|
959 | def stat(path): | |
953 | try: |
|
960 | try: | |
954 | return util.cachestat(path) |
|
961 | return util.cachestat(path) | |
955 | except OSError, e: |
|
962 | except OSError, e: | |
956 | if e.errno != errno.ENOENT: |
|
963 | if e.errno != errno.ENOENT: | |
957 | raise |
|
964 | raise | |
958 |
|
965 | |||
959 | class filecacheentry(object): |
|
966 | class filecacheentry(object): | |
960 | def __init__(self, paths, stat=True): |
|
967 | def __init__(self, paths, stat=True): | |
961 | self._entries = [] |
|
968 | self._entries = [] | |
962 | for path in paths: |
|
969 | for path in paths: | |
963 | self._entries.append(filecachesubentry(path, stat)) |
|
970 | self._entries.append(filecachesubentry(path, stat)) | |
964 |
|
971 | |||
965 | def changed(self): |
|
972 | def changed(self): | |
966 | '''true if any entry has changed''' |
|
973 | '''true if any entry has changed''' | |
967 | for entry in self._entries: |
|
974 | for entry in self._entries: | |
968 | if entry.changed(): |
|
975 | if entry.changed(): | |
969 | return True |
|
976 | return True | |
970 | return False |
|
977 | return False | |
971 |
|
978 | |||
972 | def refresh(self): |
|
979 | def refresh(self): | |
973 | for entry in self._entries: |
|
980 | for entry in self._entries: | |
974 | entry.refresh() |
|
981 | entry.refresh() | |
975 |
|
982 | |||
976 | class filecache(object): |
|
983 | class filecache(object): | |
977 | '''A property like decorator that tracks files under .hg/ for updates. |
|
984 | '''A property like decorator that tracks files under .hg/ for updates. | |
978 |
|
985 | |||
979 | Records stat info when called in _filecache. |
|
986 | Records stat info when called in _filecache. | |
980 |
|
987 | |||
981 | On subsequent calls, compares old stat info with new info, and recreates the |
|
988 | On subsequent calls, compares old stat info with new info, and recreates the | |
982 | object when any of the files changes, updating the new stat info in |
|
989 | object when any of the files changes, updating the new stat info in | |
983 | _filecache. |
|
990 | _filecache. | |
984 |
|
991 | |||
985 | Mercurial either atomic renames or appends for files under .hg, |
|
992 | Mercurial either atomic renames or appends for files under .hg, | |
986 | so to ensure the cache is reliable we need the filesystem to be able |
|
993 | so to ensure the cache is reliable we need the filesystem to be able | |
987 | to tell us if a file has been replaced. If it can't, we fallback to |
|
994 | to tell us if a file has been replaced. If it can't, we fallback to | |
988 | recreating the object on every call (essentially the same behaviour as |
|
995 | recreating the object on every call (essentially the same behaviour as | |
989 | propertycache). |
|
996 | propertycache). | |
990 |
|
997 | |||
991 | ''' |
|
998 | ''' | |
992 | def __init__(self, *paths): |
|
999 | def __init__(self, *paths): | |
993 | self.paths = paths |
|
1000 | self.paths = paths | |
994 |
|
1001 | |||
995 | def join(self, obj, fname): |
|
1002 | def join(self, obj, fname): | |
996 | """Used to compute the runtime path of a cached file. |
|
1003 | """Used to compute the runtime path of a cached file. | |
997 |
|
1004 | |||
998 | Users should subclass filecache and provide their own version of this |
|
1005 | Users should subclass filecache and provide their own version of this | |
999 | function to call the appropriate join function on 'obj' (an instance |
|
1006 | function to call the appropriate join function on 'obj' (an instance | |
1000 | of the class that its member function was decorated). |
|
1007 | of the class that its member function was decorated). | |
1001 | """ |
|
1008 | """ | |
1002 | return obj.join(fname) |
|
1009 | return obj.join(fname) | |
1003 |
|
1010 | |||
1004 | def __call__(self, func): |
|
1011 | def __call__(self, func): | |
1005 | self.func = func |
|
1012 | self.func = func | |
1006 | self.name = func.__name__ |
|
1013 | self.name = func.__name__ | |
1007 | return self |
|
1014 | return self | |
1008 |
|
1015 | |||
1009 | def __get__(self, obj, type=None): |
|
1016 | def __get__(self, obj, type=None): | |
1010 | # do we need to check if the file changed? |
|
1017 | # do we need to check if the file changed? | |
1011 | if self.name in obj.__dict__: |
|
1018 | if self.name in obj.__dict__: | |
1012 | assert self.name in obj._filecache, self.name |
|
1019 | assert self.name in obj._filecache, self.name | |
1013 | return obj.__dict__[self.name] |
|
1020 | return obj.__dict__[self.name] | |
1014 |
|
1021 | |||
1015 | entry = obj._filecache.get(self.name) |
|
1022 | entry = obj._filecache.get(self.name) | |
1016 |
|
1023 | |||
1017 | if entry: |
|
1024 | if entry: | |
1018 | if entry.changed(): |
|
1025 | if entry.changed(): | |
1019 | entry.obj = self.func(obj) |
|
1026 | entry.obj = self.func(obj) | |
1020 | else: |
|
1027 | else: | |
1021 | paths = [self.join(obj, path) for path in self.paths] |
|
1028 | paths = [self.join(obj, path) for path in self.paths] | |
1022 |
|
1029 | |||
1023 | # We stat -before- creating the object so our cache doesn't lie if |
|
1030 | # We stat -before- creating the object so our cache doesn't lie if | |
1024 | # a writer modified between the time we read and stat |
|
1031 | # a writer modified between the time we read and stat | |
1025 | entry = filecacheentry(paths, True) |
|
1032 | entry = filecacheentry(paths, True) | |
1026 | entry.obj = self.func(obj) |
|
1033 | entry.obj = self.func(obj) | |
1027 |
|
1034 | |||
1028 | obj._filecache[self.name] = entry |
|
1035 | obj._filecache[self.name] = entry | |
1029 |
|
1036 | |||
1030 | obj.__dict__[self.name] = entry.obj |
|
1037 | obj.__dict__[self.name] = entry.obj | |
1031 | return entry.obj |
|
1038 | return entry.obj | |
1032 |
|
1039 | |||
1033 | def __set__(self, obj, value): |
|
1040 | def __set__(self, obj, value): | |
1034 | if self.name not in obj._filecache: |
|
1041 | if self.name not in obj._filecache: | |
1035 | # we add an entry for the missing value because X in __dict__ |
|
1042 | # we add an entry for the missing value because X in __dict__ | |
1036 | # implies X in _filecache |
|
1043 | # implies X in _filecache | |
1037 | paths = [self.join(obj, path) for path in self.paths] |
|
1044 | paths = [self.join(obj, path) for path in self.paths] | |
1038 | ce = filecacheentry(paths, False) |
|
1045 | ce = filecacheentry(paths, False) | |
1039 | obj._filecache[self.name] = ce |
|
1046 | obj._filecache[self.name] = ce | |
1040 | else: |
|
1047 | else: | |
1041 | ce = obj._filecache[self.name] |
|
1048 | ce = obj._filecache[self.name] | |
1042 |
|
1049 | |||
1043 | ce.obj = value # update cached copy |
|
1050 | ce.obj = value # update cached copy | |
1044 | obj.__dict__[self.name] = value # update copy returned by obj.x |
|
1051 | obj.__dict__[self.name] = value # update copy returned by obj.x | |
1045 |
|
1052 | |||
1046 | def __delete__(self, obj): |
|
1053 | def __delete__(self, obj): | |
1047 | try: |
|
1054 | try: | |
1048 | del obj.__dict__[self.name] |
|
1055 | del obj.__dict__[self.name] | |
1049 | except KeyError: |
|
1056 | except KeyError: | |
1050 | raise AttributeError(self.name) |
|
1057 | raise AttributeError(self.name) | |
1051 |
|
1058 | |||
1052 | class dirs(object): |
|
1059 | class dirs(object): | |
1053 | '''a multiset of directory names from a dirstate or manifest''' |
|
1060 | '''a multiset of directory names from a dirstate or manifest''' | |
1054 |
|
1061 | |||
1055 | def __init__(self, map, skip=None): |
|
1062 | def __init__(self, map, skip=None): | |
1056 | self._dirs = {} |
|
1063 | self._dirs = {} | |
1057 | addpath = self.addpath |
|
1064 | addpath = self.addpath | |
1058 | if util.safehasattr(map, 'iteritems') and skip is not None: |
|
1065 | if util.safehasattr(map, 'iteritems') and skip is not None: | |
1059 | for f, s in map.iteritems(): |
|
1066 | for f, s in map.iteritems(): | |
1060 | if s[0] != skip: |
|
1067 | if s[0] != skip: | |
1061 | addpath(f) |
|
1068 | addpath(f) | |
1062 | else: |
|
1069 | else: | |
1063 | for f in map: |
|
1070 | for f in map: | |
1064 | addpath(f) |
|
1071 | addpath(f) | |
1065 |
|
1072 | |||
1066 | def addpath(self, path): |
|
1073 | def addpath(self, path): | |
1067 | dirs = self._dirs |
|
1074 | dirs = self._dirs | |
1068 | for base in finddirs(path): |
|
1075 | for base in finddirs(path): | |
1069 | if base in dirs: |
|
1076 | if base in dirs: | |
1070 | dirs[base] += 1 |
|
1077 | dirs[base] += 1 | |
1071 | return |
|
1078 | return | |
1072 | dirs[base] = 1 |
|
1079 | dirs[base] = 1 | |
1073 |
|
1080 | |||
1074 | def delpath(self, path): |
|
1081 | def delpath(self, path): | |
1075 | dirs = self._dirs |
|
1082 | dirs = self._dirs | |
1076 | for base in finddirs(path): |
|
1083 | for base in finddirs(path): | |
1077 | if dirs[base] > 1: |
|
1084 | if dirs[base] > 1: | |
1078 | dirs[base] -= 1 |
|
1085 | dirs[base] -= 1 | |
1079 | return |
|
1086 | return | |
1080 | del dirs[base] |
|
1087 | del dirs[base] | |
1081 |
|
1088 | |||
1082 | def __iter__(self): |
|
1089 | def __iter__(self): | |
1083 | return self._dirs.iterkeys() |
|
1090 | return self._dirs.iterkeys() | |
1084 |
|
1091 | |||
1085 | def __contains__(self, d): |
|
1092 | def __contains__(self, d): | |
1086 | return d in self._dirs |
|
1093 | return d in self._dirs | |
1087 |
|
1094 | |||
1088 | if util.safehasattr(parsers, 'dirs'): |
|
1095 | if util.safehasattr(parsers, 'dirs'): | |
1089 | dirs = parsers.dirs |
|
1096 | dirs = parsers.dirs | |
1090 |
|
1097 | |||
1091 | def finddirs(path): |
|
1098 | def finddirs(path): | |
1092 | pos = path.rfind('/') |
|
1099 | pos = path.rfind('/') | |
1093 | while pos != -1: |
|
1100 | while pos != -1: | |
1094 | yield path[:pos] |
|
1101 | yield path[:pos] | |
1095 | pos = path.rfind('/', 0, pos) |
|
1102 | pos = path.rfind('/', 0, pos) |
@@ -1,498 +1,498 | |||||
1 | # transaction.py - simple journaling scheme for mercurial |
|
1 | # transaction.py - simple journaling scheme for mercurial | |
2 | # |
|
2 | # | |
3 | # This transaction scheme is intended to gracefully handle program |
|
3 | # This transaction scheme is intended to gracefully handle program | |
4 | # errors and interruptions. More serious failures like system crashes |
|
4 | # errors and interruptions. More serious failures like system crashes | |
5 | # can be recovered with an fsck-like tool. As the whole repository is |
|
5 | # can be recovered with an fsck-like tool. As the whole repository is | |
6 | # effectively log-structured, this should amount to simply truncating |
|
6 | # effectively log-structured, this should amount to simply truncating | |
7 | # anything that isn't referenced in the changelog. |
|
7 | # anything that isn't referenced in the changelog. | |
8 | # |
|
8 | # | |
9 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
9 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> | |
10 | # |
|
10 | # | |
11 | # This software may be used and distributed according to the terms of the |
|
11 | # This software may be used and distributed according to the terms of the | |
12 | # GNU General Public License version 2 or any later version. |
|
12 | # GNU General Public License version 2 or any later version. | |
13 |
|
13 | |||
14 | from i18n import _ |
|
14 | from i18n import _ | |
15 | import os |
|
15 | import os | |
16 | import errno |
|
16 | import errno | |
17 | import error, util |
|
17 | import error, util | |
18 |
|
18 | |||
19 | version = 2 |
|
19 | version = 2 | |
20 |
|
20 | |||
21 | def active(func): |
|
21 | def active(func): | |
22 | def _active(self, *args, **kwds): |
|
22 | def _active(self, *args, **kwds): | |
23 | if self.count == 0: |
|
23 | if self.count == 0: | |
24 | raise error.Abort(_( |
|
24 | raise error.Abort(_( | |
25 | 'cannot use transaction when it is already committed/aborted')) |
|
25 | 'cannot use transaction when it is already committed/aborted')) | |
26 | return func(self, *args, **kwds) |
|
26 | return func(self, *args, **kwds) | |
27 | return _active |
|
27 | return _active | |
28 |
|
28 | |||
29 | def _playback(journal, report, opener, vfsmap, entries, backupentries, |
|
29 | def _playback(journal, report, opener, vfsmap, entries, backupentries, | |
30 | unlink=True): |
|
30 | unlink=True): | |
31 | for f, o, _ignore in entries: |
|
31 | for f, o, _ignore in entries: | |
32 | if o or not unlink: |
|
32 | if o or not unlink: | |
33 | try: |
|
33 | try: | |
34 | fp = opener(f, 'a') |
|
34 | fp = opener(f, 'a') | |
35 | fp.truncate(o) |
|
35 | fp.truncate(o) | |
36 | fp.close() |
|
36 | fp.close() | |
37 | except IOError: |
|
37 | except IOError: | |
38 | report(_("failed to truncate %s\n") % f) |
|
38 | report(_("failed to truncate %s\n") % f) | |
39 | raise |
|
39 | raise | |
40 | else: |
|
40 | else: | |
41 | try: |
|
41 | try: | |
42 | opener.unlink(f) |
|
42 | opener.unlink(f) | |
43 | except (IOError, OSError), inst: |
|
43 | except (IOError, OSError), inst: | |
44 | if inst.errno != errno.ENOENT: |
|
44 | if inst.errno != errno.ENOENT: | |
45 | raise |
|
45 | raise | |
46 |
|
46 | |||
47 | backupfiles = [] |
|
47 | backupfiles = [] | |
48 | for l, f, b, c in backupentries: |
|
48 | for l, f, b, c in backupentries: | |
49 | if l not in vfsmap and c: |
|
49 | if l not in vfsmap and c: | |
50 | report("couldn't handle %s: unknown cache location %s\n" |
|
50 | report("couldn't handle %s: unknown cache location %s\n" | |
51 | % (b, l)) |
|
51 | % (b, l)) | |
52 | vfs = vfsmap[l] |
|
52 | vfs = vfsmap[l] | |
53 | try: |
|
53 | try: | |
54 | if f and b: |
|
54 | if f and b: | |
55 | filepath = vfs.join(f) |
|
55 | filepath = vfs.join(f) | |
56 | backuppath = vfs.join(b) |
|
56 | backuppath = vfs.join(b) | |
57 | try: |
|
57 | try: | |
58 | util.copyfile(backuppath, filepath) |
|
58 | util.copyfile(backuppath, filepath) | |
59 | backupfiles.append(b) |
|
59 | backupfiles.append(b) | |
60 | except IOError: |
|
60 | except IOError: | |
61 | report(_("failed to recover %s\n") % f) |
|
61 | report(_("failed to recover %s\n") % f) | |
62 | else: |
|
62 | else: | |
63 | target = f or b |
|
63 | target = f or b | |
64 | try: |
|
64 | try: | |
65 | vfs.unlink(target) |
|
65 | vfs.unlink(target) | |
66 | except (IOError, OSError), inst: |
|
66 | except (IOError, OSError), inst: | |
67 | if inst.errno != errno.ENOENT: |
|
67 | if inst.errno != errno.ENOENT: | |
68 | raise |
|
68 | raise | |
69 | except (IOError, OSError, util.Abort), inst: |
|
69 | except (IOError, OSError, util.Abort), inst: | |
70 | if not c: |
|
70 | if not c: | |
71 | raise |
|
71 | raise | |
72 |
|
72 | |||
73 | opener.unlink(journal) |
|
73 | opener.unlink(journal) | |
74 | backuppath = "%s.backupfiles" % journal |
|
74 | backuppath = "%s.backupfiles" % journal | |
75 | if opener.exists(backuppath): |
|
75 | if opener.exists(backuppath): | |
76 | opener.unlink(backuppath) |
|
76 | opener.unlink(backuppath) | |
77 | try: |
|
77 | try: | |
78 | for f in backupfiles: |
|
78 | for f in backupfiles: | |
79 | if opener.exists(f): |
|
79 | if opener.exists(f): | |
80 | opener.unlink(f) |
|
80 | opener.unlink(f) | |
81 | except (IOError, OSError, util.Abort), inst: |
|
81 | except (IOError, OSError, util.Abort), inst: | |
82 | # only pure backup file remains, it is sage to ignore any error |
|
82 | # only pure backup file remains, it is sage to ignore any error | |
83 | pass |
|
83 | pass | |
84 |
|
84 | |||
85 | class transaction(object): |
|
85 | class transaction(object): | |
86 | def __init__(self, report, opener, vfsmap, journal, after=None, |
|
86 | def __init__(self, report, opener, vfsmap, journal, after=None, | |
87 | createmode=None): |
|
87 | createmode=None): | |
88 | """Begin a new transaction |
|
88 | """Begin a new transaction | |
89 |
|
89 | |||
90 | Begins a new transaction that allows rolling back writes in the event of |
|
90 | Begins a new transaction that allows rolling back writes in the event of | |
91 | an exception. |
|
91 | an exception. | |
92 |
|
92 | |||
93 | * `after`: called after the transaction has been committed |
|
93 | * `after`: called after the transaction has been committed | |
94 | * `createmode`: the mode of the journal file that will be created |
|
94 | * `createmode`: the mode of the journal file that will be created | |
95 | """ |
|
95 | """ | |
96 | self.count = 1 |
|
96 | self.count = 1 | |
97 | self.usages = 1 |
|
97 | self.usages = 1 | |
98 | self.report = report |
|
98 | self.report = report | |
99 | # a vfs to the store content |
|
99 | # a vfs to the store content | |
100 | self.opener = opener |
|
100 | self.opener = opener | |
101 | # a map to access file in various {location -> vfs} |
|
101 | # a map to access file in various {location -> vfs} | |
102 | vfsmap = vfsmap.copy() |
|
102 | vfsmap = vfsmap.copy() | |
103 | vfsmap[''] = opener # set default value |
|
103 | vfsmap[''] = opener # set default value | |
104 | self._vfsmap = vfsmap |
|
104 | self._vfsmap = vfsmap | |
105 | self.after = after |
|
105 | self.after = after | |
106 | self.entries = [] |
|
106 | self.entries = [] | |
107 | self.map = {} |
|
107 | self.map = {} | |
108 | self.journal = journal |
|
108 | self.journal = journal | |
109 | self._queue = [] |
|
109 | self._queue = [] | |
110 | # a dict of arguments to be passed to hooks |
|
110 | # a dict of arguments to be passed to hooks | |
111 | self.hookargs = {} |
|
111 | self.hookargs = {} | |
112 | self.file = opener.open(self.journal, "w") |
|
112 | self.file = opener.open(self.journal, "w") | |
113 |
|
113 | |||
114 | # a list of ('location', 'path', 'backuppath', cache) entries. |
|
114 | # a list of ('location', 'path', 'backuppath', cache) entries. | |
115 | # - if 'backuppath' is empty, no file existed at backup time |
|
115 | # - if 'backuppath' is empty, no file existed at backup time | |
116 | # - if 'path' is empty, this is a temporary transaction file |
|
116 | # - if 'path' is empty, this is a temporary transaction file | |
117 | # - if 'location' is not empty, the path is outside main opener reach. |
|
117 | # - if 'location' is not empty, the path is outside main opener reach. | |
118 | # use 'location' value as a key in a vfsmap to find the right 'vfs' |
|
118 | # use 'location' value as a key in a vfsmap to find the right 'vfs' | |
119 | # (cache is currently unused) |
|
119 | # (cache is currently unused) | |
120 | self._backupentries = [] |
|
120 | self._backupentries = [] | |
121 | self._backupmap = {} |
|
121 | self._backupmap = {} | |
122 | self._backupjournal = "%s.backupfiles" % journal |
|
122 | self._backupjournal = "%s.backupfiles" % journal | |
123 | self._backupsfile = opener.open(self._backupjournal, 'w') |
|
123 | self._backupsfile = opener.open(self._backupjournal, 'w') | |
124 | self._backupsfile.write('%d\n' % version) |
|
124 | self._backupsfile.write('%d\n' % version) | |
125 |
|
125 | |||
126 | if createmode is not None: |
|
126 | if createmode is not None: | |
127 | opener.chmod(self.journal, createmode & 0666) |
|
127 | opener.chmod(self.journal, createmode & 0666) | |
128 | opener.chmod(self._backupjournal, createmode & 0666) |
|
128 | opener.chmod(self._backupjournal, createmode & 0666) | |
129 |
|
129 | |||
130 | # hold file generations to be performed on commit |
|
130 | # hold file generations to be performed on commit | |
131 | self._filegenerators = {} |
|
131 | self._filegenerators = {} | |
132 | # hold callback to write pending data for hooks |
|
132 | # hold callback to write pending data for hooks | |
133 | self._pendingcallback = {} |
|
133 | self._pendingcallback = {} | |
134 | # True is any pending data have been written ever |
|
134 | # True is any pending data have been written ever | |
135 | self._anypending = False |
|
135 | self._anypending = False | |
136 | # holds callback to call when writing the transaction |
|
136 | # holds callback to call when writing the transaction | |
137 | self._finalizecallback = {} |
|
137 | self._finalizecallback = {} | |
138 | # hold callback for post transaction close |
|
138 | # hold callback for post transaction close | |
139 | self._postclosecallback = {} |
|
139 | self._postclosecallback = {} | |
140 |
|
140 | |||
141 | def __del__(self): |
|
141 | def __del__(self): | |
142 | if self.journal: |
|
142 | if self.journal: | |
143 | self._abort() |
|
143 | self._abort() | |
144 |
|
144 | |||
145 | @active |
|
145 | @active | |
146 | def startgroup(self): |
|
146 | def startgroup(self): | |
147 | """delay registration of file entry |
|
147 | """delay registration of file entry | |
148 |
|
148 | |||
149 | This is used by strip to delay vision of strip offset. The transaction |
|
149 | This is used by strip to delay vision of strip offset. The transaction | |
150 | sees either none or all of the strip actions to be done.""" |
|
150 | sees either none or all of the strip actions to be done.""" | |
151 | self._queue.append([]) |
|
151 | self._queue.append([]) | |
152 |
|
152 | |||
153 | @active |
|
153 | @active | |
154 | def endgroup(self): |
|
154 | def endgroup(self): | |
155 | """apply delayed registration of file entry. |
|
155 | """apply delayed registration of file entry. | |
156 |
|
156 | |||
157 | This is used by strip to delay vision of strip offset. The transaction |
|
157 | This is used by strip to delay vision of strip offset. The transaction | |
158 | sees either none or all of the strip actions to be done.""" |
|
158 | sees either none or all of the strip actions to be done.""" | |
159 | q = self._queue.pop() |
|
159 | q = self._queue.pop() | |
160 | for f, o, data in q: |
|
160 | for f, o, data in q: | |
161 | self._addentry(f, o, data) |
|
161 | self._addentry(f, o, data) | |
162 |
|
162 | |||
163 | @active |
|
163 | @active | |
164 | def add(self, file, offset, data=None): |
|
164 | def add(self, file, offset, data=None): | |
165 | """record the state of an append-only file before update""" |
|
165 | """record the state of an append-only file before update""" | |
166 | if file in self.map or file in self._backupmap: |
|
166 | if file in self.map or file in self._backupmap: | |
167 | return |
|
167 | return | |
168 | if self._queue: |
|
168 | if self._queue: | |
169 | self._queue[-1].append((file, offset, data)) |
|
169 | self._queue[-1].append((file, offset, data)) | |
170 | return |
|
170 | return | |
171 |
|
171 | |||
172 | self._addentry(file, offset, data) |
|
172 | self._addentry(file, offset, data) | |
173 |
|
173 | |||
174 | def _addentry(self, file, offset, data): |
|
174 | def _addentry(self, file, offset, data): | |
175 | """add a append-only entry to memory and on-disk state""" |
|
175 | """add a append-only entry to memory and on-disk state""" | |
176 | if file in self.map or file in self._backupmap: |
|
176 | if file in self.map or file in self._backupmap: | |
177 | return |
|
177 | return | |
178 | self.entries.append((file, offset, data)) |
|
178 | self.entries.append((file, offset, data)) | |
179 | self.map[file] = len(self.entries) - 1 |
|
179 | self.map[file] = len(self.entries) - 1 | |
180 | # add enough data to the journal to do the truncate |
|
180 | # add enough data to the journal to do the truncate | |
181 | self.file.write("%s\0%d\n" % (file, offset)) |
|
181 | self.file.write("%s\0%d\n" % (file, offset)) | |
182 | self.file.flush() |
|
182 | self.file.flush() | |
183 |
|
183 | |||
184 | @active |
|
184 | @active | |
185 | def addbackup(self, file, hardlink=True, location=''): |
|
185 | def addbackup(self, file, hardlink=True, location=''): | |
186 | """Adds a backup of the file to the transaction |
|
186 | """Adds a backup of the file to the transaction | |
187 |
|
187 | |||
188 | Calling addbackup() creates a hardlink backup of the specified file |
|
188 | Calling addbackup() creates a hardlink backup of the specified file | |
189 | that is used to recover the file in the event of the transaction |
|
189 | that is used to recover the file in the event of the transaction | |
190 | aborting. |
|
190 | aborting. | |
191 |
|
191 | |||
192 | * `file`: the file path, relative to .hg/store |
|
192 | * `file`: the file path, relative to .hg/store | |
193 | * `hardlink`: use a hardlink to quickly create the backup |
|
193 | * `hardlink`: use a hardlink to quickly create the backup | |
194 | """ |
|
194 | """ | |
195 | if self._queue: |
|
195 | if self._queue: | |
196 | msg = 'cannot use transaction.addbackup inside "group"' |
|
196 | msg = 'cannot use transaction.addbackup inside "group"' | |
197 | raise RuntimeError(msg) |
|
197 | raise RuntimeError(msg) | |
198 |
|
198 | |||
199 | if file in self.map or file in self._backupmap: |
|
199 | if file in self.map or file in self._backupmap: | |
200 | return |
|
200 | return | |
201 | dirname, filename = os.path.split(file) |
|
201 | dirname, filename = os.path.split(file) | |
202 | backupfilename = "%s.backup.%s" % (self.journal, filename) |
|
202 | backupfilename = "%s.backup.%s" % (self.journal, filename) | |
203 | backupfile = os.path.join(dirname, backupfilename) |
|
|||
204 | vfs = self._vfsmap[location] |
|
203 | vfs = self._vfsmap[location] | |
|
204 | backupfile = vfs.reljoin(dirname, backupfilename) | |||
205 | if vfs.exists(file): |
|
205 | if vfs.exists(file): | |
206 | filepath = vfs.join(file) |
|
206 | filepath = vfs.join(file) | |
207 | backuppath = vfs.join(backupfile) |
|
207 | backuppath = vfs.join(backupfile) | |
208 | util.copyfiles(filepath, backuppath, hardlink=hardlink) |
|
208 | util.copyfiles(filepath, backuppath, hardlink=hardlink) | |
209 | else: |
|
209 | else: | |
210 | backupfile = '' |
|
210 | backupfile = '' | |
211 |
|
211 | |||
212 | self._addbackupentry((location, file, backupfile, False)) |
|
212 | self._addbackupentry((location, file, backupfile, False)) | |
213 |
|
213 | |||
214 | def _addbackupentry(self, entry): |
|
214 | def _addbackupentry(self, entry): | |
215 | """register a new backup entry and write it to disk""" |
|
215 | """register a new backup entry and write it to disk""" | |
216 | self._backupentries.append(entry) |
|
216 | self._backupentries.append(entry) | |
217 | self._backupmap[file] = len(self._backupentries) - 1 |
|
217 | self._backupmap[file] = len(self._backupentries) - 1 | |
218 | self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry) |
|
218 | self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry) | |
219 | self._backupsfile.flush() |
|
219 | self._backupsfile.flush() | |
220 |
|
220 | |||
221 | @active |
|
221 | @active | |
222 | def registertmp(self, tmpfile, location=''): |
|
222 | def registertmp(self, tmpfile, location=''): | |
223 | """register a temporary transaction file |
|
223 | """register a temporary transaction file | |
224 |
|
224 | |||
225 | Such files will be deleted when the transaction exits (on both |
|
225 | Such files will be deleted when the transaction exits (on both | |
226 | failure and success). |
|
226 | failure and success). | |
227 | """ |
|
227 | """ | |
228 | self._addbackupentry((location, '', tmpfile, False)) |
|
228 | self._addbackupentry((location, '', tmpfile, False)) | |
229 |
|
229 | |||
230 | @active |
|
230 | @active | |
231 | def addfilegenerator(self, genid, filenames, genfunc, order=0, |
|
231 | def addfilegenerator(self, genid, filenames, genfunc, order=0, | |
232 | location=''): |
|
232 | location=''): | |
233 | """add a function to generates some files at transaction commit |
|
233 | """add a function to generates some files at transaction commit | |
234 |
|
234 | |||
235 | The `genfunc` argument is a function capable of generating proper |
|
235 | The `genfunc` argument is a function capable of generating proper | |
236 | content of each entry in the `filename` tuple. |
|
236 | content of each entry in the `filename` tuple. | |
237 |
|
237 | |||
238 | At transaction close time, `genfunc` will be called with one file |
|
238 | At transaction close time, `genfunc` will be called with one file | |
239 | object argument per entries in `filenames`. |
|
239 | object argument per entries in `filenames`. | |
240 |
|
240 | |||
241 | The transaction itself is responsible for the backup, creation and |
|
241 | The transaction itself is responsible for the backup, creation and | |
242 | final write of such file. |
|
242 | final write of such file. | |
243 |
|
243 | |||
244 | The `genid` argument is used to ensure the same set of file is only |
|
244 | The `genid` argument is used to ensure the same set of file is only | |
245 | generated once. Call to `addfilegenerator` for a `genid` already |
|
245 | generated once. Call to `addfilegenerator` for a `genid` already | |
246 | present will overwrite the old entry. |
|
246 | present will overwrite the old entry. | |
247 |
|
247 | |||
248 | The `order` argument may be used to control the order in which multiple |
|
248 | The `order` argument may be used to control the order in which multiple | |
249 | generator will be executed. |
|
249 | generator will be executed. | |
250 |
|
250 | |||
251 | The `location` arguments may be used to indicate the files are located |
|
251 | The `location` arguments may be used to indicate the files are located | |
252 | outside of the the standard directory for transaction. It should match |
|
252 | outside of the the standard directory for transaction. It should match | |
253 | one of the key of the `transaction.vfsmap` dictionary. |
|
253 | one of the key of the `transaction.vfsmap` dictionary. | |
254 | """ |
|
254 | """ | |
255 | # For now, we are unable to do proper backup and restore of custom vfs |
|
255 | # For now, we are unable to do proper backup and restore of custom vfs | |
256 | # but for bookmarks that are handled outside this mechanism. |
|
256 | # but for bookmarks that are handled outside this mechanism. | |
257 | self._filegenerators[genid] = (order, filenames, genfunc, location) |
|
257 | self._filegenerators[genid] = (order, filenames, genfunc, location) | |
258 |
|
258 | |||
259 | def _generatefiles(self, suffix=''): |
|
259 | def _generatefiles(self, suffix=''): | |
260 | # write files registered for generation |
|
260 | # write files registered for generation | |
261 | any = False |
|
261 | any = False | |
262 | for entry in sorted(self._filegenerators.values()): |
|
262 | for entry in sorted(self._filegenerators.values()): | |
263 | any = True |
|
263 | any = True | |
264 | order, filenames, genfunc, location = entry |
|
264 | order, filenames, genfunc, location = entry | |
265 | vfs = self._vfsmap[location] |
|
265 | vfs = self._vfsmap[location] | |
266 | files = [] |
|
266 | files = [] | |
267 | try: |
|
267 | try: | |
268 | for name in filenames: |
|
268 | for name in filenames: | |
269 | name += suffix |
|
269 | name += suffix | |
270 | if suffix: |
|
270 | if suffix: | |
271 | self.registertmp(name, location=location) |
|
271 | self.registertmp(name, location=location) | |
272 | else: |
|
272 | else: | |
273 | self.addbackup(name, location=location) |
|
273 | self.addbackup(name, location=location) | |
274 | files.append(vfs(name, 'w', atomictemp=True)) |
|
274 | files.append(vfs(name, 'w', atomictemp=True)) | |
275 | genfunc(*files) |
|
275 | genfunc(*files) | |
276 | finally: |
|
276 | finally: | |
277 | for f in files: |
|
277 | for f in files: | |
278 | f.close() |
|
278 | f.close() | |
279 | return any |
|
279 | return any | |
280 |
|
280 | |||
281 | @active |
|
281 | @active | |
282 | def find(self, file): |
|
282 | def find(self, file): | |
283 | if file in self.map: |
|
283 | if file in self.map: | |
284 | return self.entries[self.map[file]] |
|
284 | return self.entries[self.map[file]] | |
285 | if file in self._backupmap: |
|
285 | if file in self._backupmap: | |
286 | return self._backupentries[self._backupmap[file]] |
|
286 | return self._backupentries[self._backupmap[file]] | |
287 | return None |
|
287 | return None | |
288 |
|
288 | |||
289 | @active |
|
289 | @active | |
290 | def replace(self, file, offset, data=None): |
|
290 | def replace(self, file, offset, data=None): | |
291 | ''' |
|
291 | ''' | |
292 | replace can only replace already committed entries |
|
292 | replace can only replace already committed entries | |
293 | that are not pending in the queue |
|
293 | that are not pending in the queue | |
294 | ''' |
|
294 | ''' | |
295 |
|
295 | |||
296 | if file not in self.map: |
|
296 | if file not in self.map: | |
297 | raise KeyError(file) |
|
297 | raise KeyError(file) | |
298 | index = self.map[file] |
|
298 | index = self.map[file] | |
299 | self.entries[index] = (file, offset, data) |
|
299 | self.entries[index] = (file, offset, data) | |
300 | self.file.write("%s\0%d\n" % (file, offset)) |
|
300 | self.file.write("%s\0%d\n" % (file, offset)) | |
301 | self.file.flush() |
|
301 | self.file.flush() | |
302 |
|
302 | |||
303 | @active |
|
303 | @active | |
304 | def nest(self): |
|
304 | def nest(self): | |
305 | self.count += 1 |
|
305 | self.count += 1 | |
306 | self.usages += 1 |
|
306 | self.usages += 1 | |
307 | return self |
|
307 | return self | |
308 |
|
308 | |||
309 | def release(self): |
|
309 | def release(self): | |
310 | if self.count > 0: |
|
310 | if self.count > 0: | |
311 | self.usages -= 1 |
|
311 | self.usages -= 1 | |
312 | # if the transaction scopes are left without being closed, fail |
|
312 | # if the transaction scopes are left without being closed, fail | |
313 | if self.count > 0 and self.usages == 0: |
|
313 | if self.count > 0 and self.usages == 0: | |
314 | self._abort() |
|
314 | self._abort() | |
315 |
|
315 | |||
316 | def running(self): |
|
316 | def running(self): | |
317 | return self.count > 0 |
|
317 | return self.count > 0 | |
318 |
|
318 | |||
319 | def addpending(self, category, callback): |
|
319 | def addpending(self, category, callback): | |
320 | """add a callback to be called when the transaction is pending |
|
320 | """add a callback to be called when the transaction is pending | |
321 |
|
321 | |||
322 | The transaction will be given as callback's first argument. |
|
322 | The transaction will be given as callback's first argument. | |
323 |
|
323 | |||
324 | Category is a unique identifier to allow overwriting an old callback |
|
324 | Category is a unique identifier to allow overwriting an old callback | |
325 | with a newer callback. |
|
325 | with a newer callback. | |
326 | """ |
|
326 | """ | |
327 | self._pendingcallback[category] = callback |
|
327 | self._pendingcallback[category] = callback | |
328 |
|
328 | |||
329 | @active |
|
329 | @active | |
330 | def writepending(self): |
|
330 | def writepending(self): | |
331 | '''write pending file to temporary version |
|
331 | '''write pending file to temporary version | |
332 |
|
332 | |||
333 | This is used to allow hooks to view a transaction before commit''' |
|
333 | This is used to allow hooks to view a transaction before commit''' | |
334 | categories = sorted(self._pendingcallback) |
|
334 | categories = sorted(self._pendingcallback) | |
335 | for cat in categories: |
|
335 | for cat in categories: | |
336 | # remove callback since the data will have been flushed |
|
336 | # remove callback since the data will have been flushed | |
337 | any = self._pendingcallback.pop(cat)(self) |
|
337 | any = self._pendingcallback.pop(cat)(self) | |
338 | self._anypending = self._anypending or any |
|
338 | self._anypending = self._anypending or any | |
339 | self._anypending |= self._generatefiles(suffix='.pending') |
|
339 | self._anypending |= self._generatefiles(suffix='.pending') | |
340 | return self._anypending |
|
340 | return self._anypending | |
341 |
|
341 | |||
342 | @active |
|
342 | @active | |
343 | def addfinalize(self, category, callback): |
|
343 | def addfinalize(self, category, callback): | |
344 | """add a callback to be called when the transaction is closed |
|
344 | """add a callback to be called when the transaction is closed | |
345 |
|
345 | |||
346 | The transaction will be given as callback's first argument. |
|
346 | The transaction will be given as callback's first argument. | |
347 |
|
347 | |||
348 | Category is a unique identifier to allow overwriting old callbacks with |
|
348 | Category is a unique identifier to allow overwriting old callbacks with | |
349 | newer callbacks. |
|
349 | newer callbacks. | |
350 | """ |
|
350 | """ | |
351 | self._finalizecallback[category] = callback |
|
351 | self._finalizecallback[category] = callback | |
352 |
|
352 | |||
353 | @active |
|
353 | @active | |
354 | def addpostclose(self, category, callback): |
|
354 | def addpostclose(self, category, callback): | |
355 | """add a callback to be called after the transaction is closed |
|
355 | """add a callback to be called after the transaction is closed | |
356 |
|
356 | |||
357 | The transaction will be given as callback's first argument. |
|
357 | The transaction will be given as callback's first argument. | |
358 |
|
358 | |||
359 | Category is a unique identifier to allow overwriting an old callback |
|
359 | Category is a unique identifier to allow overwriting an old callback | |
360 | with a newer callback. |
|
360 | with a newer callback. | |
361 | """ |
|
361 | """ | |
362 | self._postclosecallback[category] = callback |
|
362 | self._postclosecallback[category] = callback | |
363 |
|
363 | |||
364 | @active |
|
364 | @active | |
365 | def close(self): |
|
365 | def close(self): | |
366 | '''commit the transaction''' |
|
366 | '''commit the transaction''' | |
367 | if self.count == 1: |
|
367 | if self.count == 1: | |
368 | self._generatefiles() |
|
368 | self._generatefiles() | |
369 | categories = sorted(self._finalizecallback) |
|
369 | categories = sorted(self._finalizecallback) | |
370 | for cat in categories: |
|
370 | for cat in categories: | |
371 | self._finalizecallback[cat](self) |
|
371 | self._finalizecallback[cat](self) | |
372 |
|
372 | |||
373 | self.count -= 1 |
|
373 | self.count -= 1 | |
374 | if self.count != 0: |
|
374 | if self.count != 0: | |
375 | return |
|
375 | return | |
376 | self.file.close() |
|
376 | self.file.close() | |
377 | self._backupsfile.close() |
|
377 | self._backupsfile.close() | |
378 | # cleanup temporary files |
|
378 | # cleanup temporary files | |
379 | for l, f, b, c in self._backupentries: |
|
379 | for l, f, b, c in self._backupentries: | |
380 | if l not in self._vfsmap and c: |
|
380 | if l not in self._vfsmap and c: | |
381 | self.report("couldn't remote %s: unknown cache location %s\n" |
|
381 | self.report("couldn't remote %s: unknown cache location %s\n" | |
382 | % (b, l)) |
|
382 | % (b, l)) | |
383 | continue |
|
383 | continue | |
384 | vfs = self._vfsmap[l] |
|
384 | vfs = self._vfsmap[l] | |
385 | if not f and b and vfs.exists(b): |
|
385 | if not f and b and vfs.exists(b): | |
386 | try: |
|
386 | try: | |
387 | vfs.unlink(b) |
|
387 | vfs.unlink(b) | |
388 | except (IOError, OSError, util.Abort), inst: |
|
388 | except (IOError, OSError, util.Abort), inst: | |
389 | if not c: |
|
389 | if not c: | |
390 | raise |
|
390 | raise | |
391 | # Abort may be raise by read only opener |
|
391 | # Abort may be raise by read only opener | |
392 | self.report("couldn't remote %s: %s\n" |
|
392 | self.report("couldn't remote %s: %s\n" | |
393 | % (vfs.join(b), inst)) |
|
393 | % (vfs.join(b), inst)) | |
394 | self.entries = [] |
|
394 | self.entries = [] | |
395 | if self.after: |
|
395 | if self.after: | |
396 | self.after() |
|
396 | self.after() | |
397 | if self.opener.isfile(self.journal): |
|
397 | if self.opener.isfile(self.journal): | |
398 | self.opener.unlink(self.journal) |
|
398 | self.opener.unlink(self.journal) | |
399 | if self.opener.isfile(self._backupjournal): |
|
399 | if self.opener.isfile(self._backupjournal): | |
400 | self.opener.unlink(self._backupjournal) |
|
400 | self.opener.unlink(self._backupjournal) | |
401 | for _l, _f, b, c in self._backupentries: |
|
401 | for _l, _f, b, c in self._backupentries: | |
402 | if l not in self._vfsmap and c: |
|
402 | if l not in self._vfsmap and c: | |
403 | self.report("couldn't remote %s: unknown cache location" |
|
403 | self.report("couldn't remote %s: unknown cache location" | |
404 | "%s\n" % (b, l)) |
|
404 | "%s\n" % (b, l)) | |
405 | continue |
|
405 | continue | |
406 | vfs = self._vfsmap[l] |
|
406 | vfs = self._vfsmap[l] | |
407 | if b and vfs.exists(b): |
|
407 | if b and vfs.exists(b): | |
408 | try: |
|
408 | try: | |
409 | vfs.unlink(b) |
|
409 | vfs.unlink(b) | |
410 | except (IOError, OSError, util.Abort), inst: |
|
410 | except (IOError, OSError, util.Abort), inst: | |
411 | if not c: |
|
411 | if not c: | |
412 | raise |
|
412 | raise | |
413 | # Abort may be raise by read only opener |
|
413 | # Abort may be raise by read only opener | |
414 | self.report("couldn't remote %s: %s\n" |
|
414 | self.report("couldn't remote %s: %s\n" | |
415 | % (vfs.join(b), inst)) |
|
415 | % (vfs.join(b), inst)) | |
416 | self._backupentries = [] |
|
416 | self._backupentries = [] | |
417 | self.journal = None |
|
417 | self.journal = None | |
418 | # run post close action |
|
418 | # run post close action | |
419 | categories = sorted(self._postclosecallback) |
|
419 | categories = sorted(self._postclosecallback) | |
420 | for cat in categories: |
|
420 | for cat in categories: | |
421 | self._postclosecallback[cat](self) |
|
421 | self._postclosecallback[cat](self) | |
422 |
|
422 | |||
423 | @active |
|
423 | @active | |
424 | def abort(self): |
|
424 | def abort(self): | |
425 | '''abort the transaction (generally called on error, or when the |
|
425 | '''abort the transaction (generally called on error, or when the | |
426 | transaction is not explicitly committed before going out of |
|
426 | transaction is not explicitly committed before going out of | |
427 | scope)''' |
|
427 | scope)''' | |
428 | self._abort() |
|
428 | self._abort() | |
429 |
|
429 | |||
430 | def _abort(self): |
|
430 | def _abort(self): | |
431 | self.count = 0 |
|
431 | self.count = 0 | |
432 | self.usages = 0 |
|
432 | self.usages = 0 | |
433 | self.file.close() |
|
433 | self.file.close() | |
434 | self._backupsfile.close() |
|
434 | self._backupsfile.close() | |
435 |
|
435 | |||
436 | try: |
|
436 | try: | |
437 | if not self.entries and not self._backupentries: |
|
437 | if not self.entries and not self._backupentries: | |
438 | if self.journal: |
|
438 | if self.journal: | |
439 | self.opener.unlink(self.journal) |
|
439 | self.opener.unlink(self.journal) | |
440 | if self._backupjournal: |
|
440 | if self._backupjournal: | |
441 | self.opener.unlink(self._backupjournal) |
|
441 | self.opener.unlink(self._backupjournal) | |
442 | return |
|
442 | return | |
443 |
|
443 | |||
444 | self.report(_("transaction abort!\n")) |
|
444 | self.report(_("transaction abort!\n")) | |
445 |
|
445 | |||
446 | try: |
|
446 | try: | |
447 | _playback(self.journal, self.report, self.opener, self._vfsmap, |
|
447 | _playback(self.journal, self.report, self.opener, self._vfsmap, | |
448 | self.entries, self._backupentries, False) |
|
448 | self.entries, self._backupentries, False) | |
449 | self.report(_("rollback completed\n")) |
|
449 | self.report(_("rollback completed\n")) | |
450 | except Exception: |
|
450 | except Exception: | |
451 | self.report(_("rollback failed - please run hg recover\n")) |
|
451 | self.report(_("rollback failed - please run hg recover\n")) | |
452 | finally: |
|
452 | finally: | |
453 | self.journal = None |
|
453 | self.journal = None | |
454 |
|
454 | |||
455 |
|
455 | |||
456 | def rollback(opener, vfsmap, file, report): |
|
456 | def rollback(opener, vfsmap, file, report): | |
457 | """Rolls back the transaction contained in the given file |
|
457 | """Rolls back the transaction contained in the given file | |
458 |
|
458 | |||
459 | Reads the entries in the specified file, and the corresponding |
|
459 | Reads the entries in the specified file, and the corresponding | |
460 | '*.backupfiles' file, to recover from an incomplete transaction. |
|
460 | '*.backupfiles' file, to recover from an incomplete transaction. | |
461 |
|
461 | |||
462 | * `file`: a file containing a list of entries, specifying where |
|
462 | * `file`: a file containing a list of entries, specifying where | |
463 | to truncate each file. The file should contain a list of |
|
463 | to truncate each file. The file should contain a list of | |
464 | file\0offset pairs, delimited by newlines. The corresponding |
|
464 | file\0offset pairs, delimited by newlines. The corresponding | |
465 | '*.backupfiles' file should contain a list of file\0backupfile |
|
465 | '*.backupfiles' file should contain a list of file\0backupfile | |
466 | pairs, delimited by \0. |
|
466 | pairs, delimited by \0. | |
467 | """ |
|
467 | """ | |
468 | entries = [] |
|
468 | entries = [] | |
469 | backupentries = [] |
|
469 | backupentries = [] | |
470 |
|
470 | |||
471 | fp = opener.open(file) |
|
471 | fp = opener.open(file) | |
472 | lines = fp.readlines() |
|
472 | lines = fp.readlines() | |
473 | fp.close() |
|
473 | fp.close() | |
474 | for l in lines: |
|
474 | for l in lines: | |
475 | try: |
|
475 | try: | |
476 | f, o = l.split('\0') |
|
476 | f, o = l.split('\0') | |
477 | entries.append((f, int(o), None)) |
|
477 | entries.append((f, int(o), None)) | |
478 | except ValueError: |
|
478 | except ValueError: | |
479 | report(_("couldn't read journal entry %r!\n") % l) |
|
479 | report(_("couldn't read journal entry %r!\n") % l) | |
480 |
|
480 | |||
481 | backupjournal = "%s.backupfiles" % file |
|
481 | backupjournal = "%s.backupfiles" % file | |
482 | if opener.exists(backupjournal): |
|
482 | if opener.exists(backupjournal): | |
483 | fp = opener.open(backupjournal) |
|
483 | fp = opener.open(backupjournal) | |
484 | lines = fp.readlines() |
|
484 | lines = fp.readlines() | |
485 | if lines: |
|
485 | if lines: | |
486 | ver = lines[0][:-1] |
|
486 | ver = lines[0][:-1] | |
487 | if ver == str(version): |
|
487 | if ver == str(version): | |
488 | for line in lines[1:]: |
|
488 | for line in lines[1:]: | |
489 | if line: |
|
489 | if line: | |
490 | # Shave off the trailing newline |
|
490 | # Shave off the trailing newline | |
491 | line = line[:-1] |
|
491 | line = line[:-1] | |
492 | l, f, b, c = line.split('\0') |
|
492 | l, f, b, c = line.split('\0') | |
493 | backupentries.append((l, f, b, bool(c))) |
|
493 | backupentries.append((l, f, b, bool(c))) | |
494 | else: |
|
494 | else: | |
495 | report(_("journal was created by a different version of " |
|
495 | report(_("journal was created by a different version of " | |
496 | "Mercurial")) |
|
496 | "Mercurial")) | |
497 |
|
497 | |||
498 | _playback(file, report, opener, vfsmap, entries, backupentries) |
|
498 | _playback(file, report, opener, vfsmap, entries, backupentries) |
General Comments 0
You need to be logged in to leave comments.
Login now