Skip to content

Commit

Permalink
geo-replication: replace logging.warn with logging.warning (#4344) (#…
Browse files Browse the repository at this point in the history
…4345)

logging.warn is an alias to logging.warning since Python 3.3 and will be
removed in Python 3.13.
  • Loading branch information
jelly authored Jul 22, 2024
1 parent 599c608 commit ea377b9
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 47 deletions.
2 changes: 1 addition & 1 deletion geo-replication/syncdaemon/gsyncd.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ def main():
)

if config_file_error_msg is not None:
logging.warn(config_file_error_msg)
logging.warning(config_file_error_msg)

# Log message for loaded config file
if config_file is not None:
Expand Down
56 changes: 28 additions & 28 deletions geo-replication/syncdaemon/primary.py
Original file line number Diff line number Diff line change
Expand Up @@ -542,8 +542,8 @@ def crawlwrap(self, oneshot=False, register_time=None):
rconf.volume_id = self.uuid
if self.volinfo:
if self.volinfo['retval']:
logging.warn(lf("primary cluster's info may not be valid",
error=self.volinfo['retval']))
logging.warning(lf("primary cluster's info may not be valid",
error=self.volinfo['retval']))
else:
raise GsyncdError("primary volinfo unavailable")
self.lastreport['time'] = time.time()
Expand Down Expand Up @@ -1169,9 +1169,9 @@ def process_change(self, change, done, retry):
entries.append(
edct(ty, stat=st, entry=en, gfid=gfid, link=rl))
else:
logging.warn(lf('ignoring op',
gfid=gfid,
type=ty))
logging.warning(lf('ignoring op',
gfid=gfid,
type=ty))
elif et == self.TYPE_GFID:
# If self.unlinked_gfids is available, then that means it is
# retrying the changelog second time. Do not add the GFID's
Expand Down Expand Up @@ -1202,8 +1202,8 @@ def process_change(self, change, done, retry):
(gconf.get("sync-xattrs") or gconf.get("sync-acls")):
datas.add(os.path.join(pfx, ec[0]))
else:
logging.warn(lf('got invalid fop type',
type=et))
logging.warning(lf('got invalid fop type',
type=et))
logging.debug('entries: %s' % repr(entries))

# Increment counters for Status
Expand Down Expand Up @@ -1407,8 +1407,8 @@ def process(self, changes, done=1):
# entry_ops() that failed... so we retry the _whole_ changelog
# again.
# TODO: remove entry retries when it's gets fixed.
logging.warn(lf('incomplete sync, retrying changelogs',
files=list(map(os.path.basename, changes))))
logging.warning(lf('incomplete sync, retrying changelogs',
files=list(map(os.path.basename, changes))))

# Reset the Data counter before Retry
self.status.dec_value("data", self.files_in_batch)
Expand Down Expand Up @@ -1708,8 +1708,8 @@ def Xsyncer():
time=item[1]))
self.upd_stime(item[1][1], item[1][0])
else:
logging.warn(lf('unknown tuple in comlist',
entry=item))
logging.warning(lf('unknown tuple in comlist',
entry=item))
except IndexError:
time.sleep(1)

Expand Down Expand Up @@ -1787,20 +1787,20 @@ def Xcrawl(self, path='.', xtr_root=None):
xtr_root = self.xtime('.', self.secondary)
if isinstance(xtr_root, int):
if xtr_root != ENOENT:
logging.warn(lf("secondary cluster not returning the "
"xtime for root",
error=xtr_root))
logging.warning(lf("secondary cluster not returning the "
"xtime for root",
error=xtr_root))
xtr_root = self.minus_infinity
xtl = self.xtime(path)
if isinstance(xtl, int):
logging.warn("primary cluster's xtime not found")
logging.warning("primary cluster's xtime not found")
xtr = self.xtime(path, self.secondary)
if isinstance(xtr, int):
if xtr != ENOENT:
logging.warn(lf("secondary cluster not returning the "
"xtime for dir",
path=path,
error=xtr))
logging.warning(lf("secondary cluster not returning the "
"xtime for dir",
path=path,
error=xtr))
xtr = self.minus_infinity
xtr = max(xtr, xtr_root)
zero_zero = (0, 0)
Expand All @@ -1815,32 +1815,32 @@ def Xcrawl(self, path='.', xtr_root=None):
dem = self.primary.server.entries(path)
pargfid = self.primary.server.gfid(path)
if isinstance(pargfid, int):
logging.warn(lf('skipping directory',
path=path))
logging.warning(lf('skipping directory',
path=path))
for e in dem:
bname = e
e = os.path.join(path, e)
xte = self.xtime(e)
if isinstance(xte, int):
logging.warn(lf("irregular xtime",
path=e,
error=errno.errorcode[xte]))
logging.warning(lf("irregular xtime",
path=e,
error=errno.errorcode[xte]))
continue
if not self.need_sync(e, xte, xtr):
continue
st = self.primary.server.lstat(e)
if isinstance(st, int):
logging.warn(lf('got purged in the interim',
path=e))
logging.warning(lf('got purged in the interim',
path=e))
continue
if self.is_sticky(e, st.st_mode):
logging.debug(lf('ignoring sticky bit file',
path=e))
continue
gfid = self.primary.server.gfid(e)
if isinstance(gfid, int):
logging.warn(lf('skipping entry',
path=e))
logging.warning(lf('skipping entry',
path=e))
continue
mo = st.st_mode
self.counter += 1 if ((stat.S_ISDIR(mo) or
Expand Down
24 changes: 12 additions & 12 deletions geo-replication/syncdaemon/resource.py
Original file line number Diff line number Diff line change
Expand Up @@ -543,17 +543,17 @@ def rename_with_disk_gfid_confirmation(gfid, entry, en, uid, gid):
logging.debug("Removed %s => %s/%s recursively" %
(gfid, pg, bname))
else:
logging.warn(lf("Recursive remove failed",
gfid=gfid,
pgfid=pg,
bname=bname,
error=os.strerror(er1)))
logging.warning(lf("Recursive remove failed",
gfid=gfid,
pgfid=pg,
bname=bname,
error=os.strerror(er1)))
else:
logging.warn(lf("Failed to remove",
gfid=gfid,
pgfid=pg,
bname=bname,
error=os.strerror(er)))
logging.warning(lf("Failed to remove",
gfid=gfid,
pgfid=pg,
bname=bname,
error=os.strerror(er)))
elif op in ['CREATE', 'MKNOD']:
slink = os.path.join(pfx, gfid)
st = lstat(slink)
Expand Down Expand Up @@ -902,8 +902,8 @@ def inhibit(self, label):
if rv:
rv = (os.WIFEXITED(rv) and os.WEXITSTATUS(rv) or 0) - \
(os.WIFSIGNALED(rv) and os.WTERMSIG(rv) or 0)
logging.warn(lf('stale mount possibly left behind',
path=d))
logging.warning(lf('stale mount possibly left behind',
path=d))
raise GsyncdError("cleaning up temp mountpoint %s "
"failed with status %d" %
(d, rv))
Expand Down
12 changes: 6 additions & 6 deletions geo-replication/syncdaemon/syncdutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -463,9 +463,9 @@ def boolify(s):
if lstr in true_list:
rv = True
elif lstr not in false_list:
logging.warn(lf("Unknown string in \"string to boolean\" conversion, "
"defaulting to False",
str=s))
logging.warning(lf("Unknown string in \"string to boolean\" conversion, "
"defaulting to False",
str=s))

return rv

Expand Down Expand Up @@ -578,9 +578,9 @@ def errno_wrap(call, arg=[], errnos=[], retry_errnos=[]):
nr_tries += 1
if nr_tries == GF_OP_RETRIES:
# probably a screwed state, cannot do much...
logging.warn(lf('reached maximum retries',
args=repr(arg),
error=ex))
logging.warning(lf('reached maximum retries',
args=repr(arg),
error=ex))
raise
time.sleep(0.250) # retry the call

Expand Down

0 comments on commit ea377b9

Please sign in to comment.