I had spelling mistake in my docker file in the COPY command and when I corrected it it work.
An update to @saptarshi-basu answer,
Buffer.slice is now deprecated and replaced with Buffer.subarray
The salt can also be added to the encrypted buffer array to help in the decryption process later
So a javascript implementation becomes
//--------------------------------------------------
// INCLUDES
//--------------------------------------------------
const crypto = require('crypto');
const { Buffer } = require("buffer");
//--------------------------------------------------
//-----------------------------------------
// module exports
//-----------------------------------------
module.exports = { encryptVal,
decryptVal
};
//-----------------------------------------
//--------------------------------------------------
// CONSTANTS
//
//--------------------------------------------------
const ENCRYPTION_CONSTANTS = {
ALGORITHM: 'aes-256-gcm', //--the algorithm used for encryption
KEY_BYTE_LENGTH: 32, //--the length of the key used for encryption
IV_BYTE_LENGTH: 16, //--the length of the initialization vector
SALT_BYTE_LENGTH: 16, //--the length of the salt used for encryption
AUTH_TAG_BYTE_LENGTH: 16, //--the length of the authentication tag
INPUT_ENCODING: 'utf-8', //--the encoding of the input data
OUTPUT_ENCODING: 'base64url', //--the encoding of the output data in base64url (url/cookies friendly)
//OUTPUT_ENCODING: 'base64', //--the encoding of the output data in base64
//OUTPUT_ENCODING: 'hex', //--the encoding of the output data in hex
}
//--------------------------------------------------
//--------------------------------------------------
/**
* This function is use to generate a random key
* for the encryption process.
*
* @returns {Buffer} the generated random key
*/
async function getRandomKey() {
return crypto.randomBytes(ENCRYPTION_CONSTANTS.KEY_BYTE_LENGTH);
}
/**
* This function is use to generate a key base
* on the given password and salt for the
* encryption process.
*
* @returns {Buffer} the generated random key
*/
async function getKeyFromPassword(gPassword, gSalt){
return crypto.scryptSync(gPassword, gSalt, ENCRYPTION_CONSTANTS.KEY_BYTE_LENGTH);
}
/**
* This function is use to generate a random salt
* for the encryption process.
*
* @returns {Buffer} the generated random salt
*/
async function getSalt(){
return crypto.randomBytes(ENCRYPTION_CONSTANTS.SALT_BYTE_LENGTH);
}
/**
* This function is use to generate a random salt
* for the encryption process.
*
* @returns {Buffer} the generated random salt
*/
async function getInitializationVector(){
return crypto.randomBytes(ENCRYPTION_CONSTANTS.IV_BYTE_LENGTH);
}
/**
* This function is use to encrypt a given value using
* the given password.
*
* @param {Buffer} gVal the value to be encrypted
* @param {Buffer} gPassword the password to be used for encryption
*
* @returns {Buffer} the encrypted value
*/
async function encryptVal(gVal, gPassword){
try{
const algorithm = ENCRYPTION_CONSTANTS.ALGORITHM;
const iv = await getInitializationVector();
const salt = await getSalt();
const key = await getKeyFromPassword(gPassword, salt);
const cipher = crypto.createCipheriv(algorithm, key, iv, {
authTagLength: ENCRYPTION_CONSTANTS.AUTH_TAG_BYTE_LENGTH
});
const encryptedResults = Buffer.concat([cipher.update(gVal, ENCRYPTION_CONSTANTS.INPUT_ENCODING), cipher.final()]);
return Buffer.concat([iv, salt, encryptedResults, cipher.getAuthTag()])
.toString(ENCRYPTION_CONSTANTS.OUTPUT_ENCODING);
}catch(err){
//--log error to the system
const errMsg = '--->>ERROR: `encryptVal()` error: '+err;
console.log(errMsg);
}
}
/**
* This function is use to decrypt a given encrypted
* value using the given password.
*
* @param {Buffer} gEncryptedVal the value to be decrypted
* @param {Buffer} gPassword the password to be used for decryption
*
* @returns {Buffer} the decrypted value
*/
async function decryptVal(gEncryptedVal, gPassword){
try{
const algorithm = ENCRYPTION_CONSTANTS.ALGORITHM;
const encryptedBuffer = Buffer.from(gEncryptedVal, ENCRYPTION_CONSTANTS.OUTPUT_ENCODING);
const iv = encryptedBuffer.subarray(0, ENCRYPTION_CONSTANTS.IV_BYTE_LENGTH);
const salt = encryptedBuffer.subarray(ENCRYPTION_CONSTANTS.IV_BYTE_LENGTH, ENCRYPTION_CONSTANTS.IV_BYTE_LENGTH + ENCRYPTION_CONSTANTS.SALT_BYTE_LENGTH);
const encryptedData = encryptedBuffer.subarray((ENCRYPTION_CONSTANTS.IV_BYTE_LENGTH + ENCRYPTION_CONSTANTS.SALT_BYTE_LENGTH), -ENCRYPTION_CONSTANTS.AUTH_TAG_BYTE_LENGTH);
const authTag = encryptedBuffer.subarray(-ENCRYPTION_CONSTANTS.AUTH_TAG_BYTE_LENGTH);
const key = await getKeyFromPassword(gPassword, salt);
const decipher = crypto.createDecipheriv(algorithm, key, iv, {
authTagLength: ENCRYPTION_CONSTANTS.AUTH_TAG_BYTE_LENGTH
});
decipher.setAuthTag(authTag);
return Buffer.concat([decipher.update(encryptedData), decipher.final()])
.toString(ENCRYPTION_CONSTANTS.INPUT_ENCODING);
}catch(err){
//--log error to the system
const errMsg = '--->>ERROR: `decryptVal()` error: '+err;
console.log(errMsg);
}
}
You can try the functions above as:
//--test encrypt and decrypt helper functions
async function testEncryptDecrypt(){
const txt = 'Hello World';
const password = "opoo";
const encryptedTxt = await encryptVal(txt, password);
const decryptedTxt = await decryptVal(encryptedTxt, password);
console.log('-->>OUTPUT: the encrypted text is: '+encryptedTxt);
console.log('-->>OUTPUT: the decrypted text is: '+decryptedTxt);
}
testEncryptDecrypt();
Unfortunately Lbank doesn't any api help for PHP,but you can check this link dude: https://git.fabi.me/flap/ccxt/-/blob/d34a0651b209ac77453f05c4ce31883f0cd2d6b8/php/lbank.php
GeoJSON is always lon/lat as defined by the standard.
2024-11-24 16:21:15.336 8833 8833 com.conena.logcat.reader D View : [ANR Warning]onLayout time too long, this =DecorView@265b7e0[MainActivity]time =696 ms I get this message and don't know how to fix it
git show --all -- path/to/file
Finally fixed it, I think it was coming from a misconfiguration of Webstorm. I followed the doc on Jetbrain's website. In particular I took the time to properly configure Language Services for TypeScript and run configuration, I use ts-node on the produced JS code.
There seems to be no need to use npx tsc --init, to generate the package.json and tsconfig.json one can simply create the files from the Project window.
Materialize and Feldera support mutual recursion.
I made an app to do the same for meta quest 3, check this out.
https://zoynctech.itch.io/room-scan-exporter-for-meta-quest-3
Hey I configured the source_table_name as '*' but when syncing to target database it says
[master-engine] - PushService - Push data sent to destination:002:002
[client-engine] - ManageIncomingBatchListener - The incoming batch 001-138 failed: Could not find the target table '*'
[master-engine] - AcknowledgeService - The outgoing batch 002-138 failed: Could not find the target table '*'
how are you syncing those?
On Frontend, You will need to specify authMode.
import { generateClient } from "aws-amplify/api";
import type { Schema } from "~/amplify/data/resource";
const client = generateClient<Schema>();
const response = await client.queries.sendEmail({
name: "Amplify",
},{
authMode: "userPool"
});
doest work , how is the uart_rx_intr_handler gonna be called?
I do not exactly get what you're trying to achieve, but an array in js could be filtered like this
let arr = [123,345,567,789]
let Search = 3
arr.filter(x=> x.toString().includes(Search))
// Result: [123,345]
Please let know if it helps or provide more information
I had the same class in the /lib folder and deleted that and it worked.
Tried some things and found a couple ways to approximate it. wxdraw(terminal='png,file_name="par",gr3d(axis_3d=false,border=false,line_width=6,color=gray82 ,view=[111,0],implicit(1.04x^2+1.2y^2+9921z^2-1,x,-1,1,y,-1,1,z,-1,1) ,line_width=.3,color=black,makelist(parametric(sin(t)cos(%pip/8),sin(t)sin(%pip/8),cos(t),t,0,%pi+.1),p,0,8),makelist(parametric( sin(t%pi/16)cos(p),sin(t%pi/16)sin(p),cos(t%pi/16),p,0,%pi),t,0,13) ,proportional_axes='xyz))
and wxdraw(terminal='png,file_name="multi",gr3d(axis_3d=false,border=false,line_width=15,color=gray80,view=[66,0],implicit(1.2x^2+2y^2+9921z^2-1,x,-1,1,y,-1,1,z,-1,1),line_width=.5,color=black,xu_grid=12,yv_grid = 16,spherical(1,a,%pi-.1,2%pi+.21,z,0,%pi),proportional_axes='xyz));
If really wanted to could probably find a way to make dotted lines on the rear end or can do that in tex along with a better way to make the axis/dimensions text notation etc.than wxmaxima wxdraw does.
Using ESLint 9 and its flat configuration, env is no more valid.
You have to add ...globals.jasmine in the globals object of the language options (in your eslint.config.mjs configuration file).
languageOptions: {
//...
globals: {
//...
...globals.jasmine
}
},
I fixed the issue by updating the CMAKE in SDK Tools in Android Studio, deleting the old build and re-building the app again. Also, please make sure you have the latest Visual C++ Redistributable installed.
Just had the same problem: check the binding with
netsh http show sslcert
then correct the entries if needed:
netsh http add sslcert hostnameport=<servername>:443 certhash=<cerhash> appid={yourappid} certstorename=MY
It's 2024. I hope this helps someone. I stumbled upon a solution thanks to posts here.
When using filter, you are then able to change the color of the "x".
input[type="search"]::-webkit-search-cancel-button {
filter: invert(100%);
color: yellow;
}
The approach worked really well:
git clone
git rm --cache[filename] ##no space between cache and filename
git commit -m "Your helper message"
git push (push the change made to the git repository)
It seems like when saving Influencer it is trying to create a new record - and if Influencer is derived from User - then you are missing email, username, etc.
I think what would work is after committing the User record - re-read it back as the appropriate type (Influencer/Sponsor) - then fill in additional params.
I am not sure why you can’t just create an Influencer/Sponsor record in full at registration time rather than the 2-step process.. I must be missing something.
The SetClipboardViewer is an older protocol dating back to Windows 95. The newer version is the AddClipboardFormatListener
Here is a writeup showing how to use the AddClipboardFormatListener in C#:
Monitor for clipboard changes using AddClipboardFormatListener
Best of luck.
My suggestion will be to make sure you have turned on LSP in the settings of vscode. Simply go to settings and search for "LSP", you will find a checkbox containing "auto-completion".
I think some stuff changed since the answers for mongo 6 did not work for me, using mongo 8. So I made slight tweaks (here's a GitHub repository).
openssl rand -base64 756 > ./init/mongo/mongo-keyfile
chmod 400 ./init/mongo/mongo-keyfile
name: mongo-stack
services:
mongo:
image: mongo:8.0
restart: unless-stopped
command: ["--replSet", "rs0", "--keyFile", "/etc/mongo-keyfile"]
container_name: starter-mongo
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: example
volumes:
- mongo-vol:/data/db
- ./init/mongo/mongo-keyfile:/etc/mongo-keyfile:ro
networks:
- mongo-net
ports:
- 27017:27017
healthcheck:
test: mongosh --host localhost:27017 --eval 'db.adminCommand("ping")' || exit 1
interval: 5s
timeout: 30s
start_period: 0s
start_interval: 1s
retries: 30
mongo-init-replica:
image: mongo:8.0
container_name: mongo-init-replica
depends_on:
- mongo
volumes:
- ./init/mongo/init-replica.sh:/docker-entrypoint-initdb.d/init-replica.sh:ro
entrypoint: ["/docker-entrypoint-initdb.d/init-replica.sh"]
networks:
- mongo-net
volumes:
mongo-vol:
driver: local
networks:
mongo-net:
name: starter-mongo-net
driver: bridge
./init/mongo/init-replica.sh#!/bin/bash
echo ====================================================
echo ============= Initializing Replica Set =============
echo ====================================================
# Loop until MongoDB is ready to accept connections
until mongosh --host mongo:27017 --eval 'quit(0)' &>/dev/null; do
echo "Waiting for mongod to start..."
sleep 5
done
echo "MongoDB started. Initiating Replica Set..."
# Connect to the MongoDB service and initiate the replica set
mongosh --host mongo:27017 -u root -p example --authenticationDatabase admin <<EOF
rs.initiate({
_id: "rs0",
members: [
{ _id: 0, host: "localhost:27017" }
]
})
EOF
echo ====================================================
echo ============= Replica Set initialized ==============
echo ====================================================
Please note that I tried to run the entry point file directly from the mongo service but it simply refused to initiate the replica set for whatever reason. It had to be done from a separate container.
I just found that the cause of the error is I misconfigured the router_id. It was default -> source to destination
and I have now error that
Error Message = ORA-00942: table or view does not exist. It should create the table on the destination right?
it is simple because during testing you used your website so it is auto-generated data or we have to contact SQL for the data processing...it shows the active traffic time from all users. they are not advanced to show the user data as per their requirements so,
i have a deer camra but it keeps going to the same thing how do I git the pitcher of itr neve cud figer it out can yo uple help me git the pitcher off this stopped camra
This algo fixes for me! thanks @SaifGo
cmd as admin:
wsl --update
netsh winsock reset
To use z-index to put the box-shadow over the box, set the z-index of the box to 0, and the z-index of the box-shadow to 1, or any value greater than zero, like this:
.box {
z-index:0;
}
.box-shadow {
z-index:1;
}
Thanks for the information above. I have tried a new way to calculate it BUT in a real Pixel6 device, not an emulator.
val xdpi = displayMetrics.xdpi -> 409.432
val ydpi = displayMetrics.ydpi -> 411.891
It is a value around 411
If I calculate the diagonalInches using this value instead of the density I get 6,4inches
What worked for me was a simple restart, as suggested by the message.
Line in particular:
"The system may need to be restarted so the changes can take effect"
So, I restarted. 10 or so seconds after sign-in, a pop-up window appears:

But in my case, this was a dud! So I proceeded to install a distribution the normal way, using:
wsl --list --online to list the available distros.wsl --install -d <DistroName> where represents an option from the list.In my specific case, I wanted to install Ubuntu which apparently was already installed, but re-installs and launches anyway.

std::array is probably the best fit for a modern fixed size contiguous memory container.
the solution is to uninstall opera mobile... learned my lesson after troubleshooting for 45 minutes..
Disappointing that people can't answer the actual question raised by the OP but provide alternative mechanisms instead.
While these may well work, they don't help people understand the underlying cause.
This particular issue appears to be the RetentionPolicyTagLinks parameter takes a specific object type so cannot handle a hash table as input, but perhaps someone can suggest methods to cast to different object types with PowerShell?
def handle_default_values(df):
for column, dtype in df.dtypes:
if dtype == 'int':
df = df.withColumn(column, when(col(column).isNull(), lit(-1)).otherwise(col(column)))
elif (dtype == 'float') or (dtype == 'double') or (dtype =='decimal(18,2)'):
df = df.withColumn(column, when(col(column).isNull(), lit(0.0)).otherwise(col(column)))
elif dtype == 'string':
df = df.withColumn(column, when(col(column).isNull(), lit('UNK')).otherwise(col(column)))
elif dtype == 'timestamp':
df = df.withColumn(column, when(col(column).isNull(), to_date(lit('1900-01-01'))).otherwise(col(column)))
return df
the new yt api requires login and this breaks all public sharing and embeds.
now yt videos are a walled garden you can only see if you are logged in.
so no privacy is allowed if you want to watch a yt video.
if you live in a country with censorship say goodbye to YT and freedom of thought.
The combination of the 2 FK-keys is a so-called "composite primary key" and as a such can be a primary key. A composite key in an SQL table is a primary key that consists of two or more columns combined to uniquely identify a record in the table. This is used when a single column cannot uniquely identify a row, but a combination of columns can.
add in ts code:
navigator.clipboard.writeText('pippo');
is javascrpit way, here can found docs: https://www.w3schools.com/howto/tryit.asp?filename=tryhow_js_copy_clipboard
I was able to get it to work. I used the code =SUMIFS(D2:D203, A2:A203, "Not Cooked", C2:C203, G2:G203). I'm not sure why switching the order of the criteria changed the output to one that worked, perhaps by starting with the criterion which applies to every row it allows for the second criterion to be one which has less rows, since there are less unique ingredients then there are ingredient entries next to the meals.
Thanks for the response. From what I can tell, there is no VS integration. I have been able to sign manually with SignTool.exe.
If you don’t need any returned value, the smallest HTTP Response is :
HTTP/1.1 204 No Content\r\n\r\n
What about using the fourth dimension?
*if I have wrongly assumed that some parts don't need explaining, please do give feedback!
fileName = "myDatabase.h5";
datasetName = "/myDataset";
myMat1 = zeros(12,24,12,1);
myMat1ID = 1; % so that you can call the matrix of interest
myMat2 = ones(12,24,12,1);
myMat2ID = 2;
myRandomMatID = 99;
h5create(fileName,datasetName,[12 24 12 Inf], ChunkSize=[12 24 12 20]);
% ChunkSize must be given for Inf axes. I picked 20 for a 2MB chunk..
h5write(fileName,datasetName,myMat1,[1 1 1 myMat1ID],[12 24 12 1]);
h5write(fileName,datasetName,myMat2,[1 1 1 myMat2ID],[12 24 12 1]);
% We write a random matrix for demonstration
h5write(fileName,datasetName,rand(12,24,12,1),[1 1 1 myRandomMatID],[12 24 12 1]);
% Matrix 1 size and a 3x3 sample:
mat1=h5read(fileName,datasetName,[1 1 1 myMat1ID],[12 24 12 1]);
disp(size(mat1));
disp(mat1(1:3,1:3,4));
% The random matrix size and a 3x3 sample:
mat3=h5read(fileName,datasetName,[1 1 1 myRandomMatID],[12 24 12 1]);
disp(size(mat3));
disp((mat3(1:3,1:3,8)));
Output:
12 24 12
0 0 0
0 0 0
0 0 0
12 24 12
0.0021 0.8974 0.2568
0.5009 0.4895 0.9892
0.8742 0.2310 0.8078
Yes, I was, but it was a cumbersome effort until every piece got in its own place: https://akobor.me/posts/the-curious-incident-of-google-cast-in-jetpack-compose
None of the above answers worked for me, using vscode 1.95.3, C# extension v2.55.29, and dotnet 9.0.100.
the C# extension / vscode-csharp / ms-dotnettools.csharp no longer uses omnisharp by default, it uses roslin now. Also, if omnisharp finds a .editorconfig file it will ignore omnisharp.json. the C# Dev Kit extension (vscode-dotnettools) must be uninstalled, and vscode-csharp must be configured to enable omnisharp and ignore editorconfig.
"dotnet.server.useOmnisharp": true,
"omnisharp.useEditorFormattingSettings": false
then @thornebrandt 's omnipath.json should work.
source: What changed about the C# extension for VS Code's IntelliSense in the v2.0.320 release?
When issues arise like this, and it's not clear what the problem is or where time is being spent, the best way to understand the root cause is profiling. Profiling is the process of analyzing your application's performance to identify bottlenecks and optimize its behavior. For PHP, there are several profiling tools available. I recommend using one of these: Xdebug (https://xdebug.org/) , Blackfire (https://www.blackfire.io/), or Tideways (https://tideways.com/). These tools can help you get a clear picture of what's happening in your application.
I suggest you to install xdebug, make profile and look at this.
How to install xdebug, documentation is here: https://xdebug.org/docs/install
Docs about profiling: https://xdebug.org/docs/profiler
Hope it helps. Cheers!
You must go to file manager from the online python interpreter. Then create a new text file. Enter the details in the file editor and save this file. Once you are done saving the txt file in the file manager, then your code should work using an online python interpreter.
Watch 🟢 ➤ ➤ ➤ 🌐 Click Here To link (Full Viral Video Link HD)
RBAC is the correct way to limit access to data to authorised users. Perhaps also combined with Masking policies and Row access policies. But this is not the same as having Data Exfiltration controls. It can be entirely legitimate to access data for query and analysis but not export it out of Snowflake or download it locally.
This can be accomplished by disabling external stages and several other account parameters https://www.snowflake.com/en/blog/how-to-configure-a-snowflake-account-to-prevent-data-exfiltration/.
Restricting downloads locally is more tricky as this is entirely controlled by the client i.e. user browser, Power BI Desktop, etc. This aspect of Data Exfil control is therefore client-dependent.
You can now restrict access to the Snowsight UI with Authentication policies https://docs.snowflake.com/en/user-guide/authentication-policies and then you can ask customer support to disable the "Download results" button in the browser. This is an internal setting that only SF can enable/disable (this isn't bulletproof as the user could still enable the setting again locally with javascript - assuming they know what they're doing) but it's at least something.
By combining these two options you can restrict access to the Snowsight UI and disable downloads locally. However, there is currently no setting to disable copy/paste from the results window. You could try to mitigate this with max rows returned https://docs.snowflake.com/en/sql-reference/parameters#rows-per-resultset but this can still be overridden at the session level by the user. Lastly, it's worth mentioning that you can monitor data egress with the following views:
The query history view will show GET requests for data that is downloaded locally from an internal stage e.g. via the Snowsql cli but there is no ability to log downloads from the UI (if you have the button enabled).
And of course, none of these measures would prevent a user from using a camera and OCR library to extract data from the Snowsight UI. So, at best these measures would only help to protect against "inadvertent" Data Exfil and perhaps slow down/restrict the actions of a bad actor.
if i,an no where near a developer, i am trying to find a way to search with multiple choices, that lead me to what i want to find. not too many, just simple ones, that are faster.. kind of like 20 questions, find in 20 or less questions.
After racking my brains, my solution was to add this to the top of the problematic page: export const dynamic = 'force-dynamic'
I'm guessing you sorted this or got on with your life as it was a while ago! CardDAV is definitely a little idiosyncratic! I haven't found that not implementing me-card has ever caused ios contacts to crash though.
One frustrating thing about iOS and the contacts app worth being aware of is that it syncs when it wants to and pulling down to re-fresh does nothing.
If you're still having issues implementing this I'd suggest using Charles proxy with SSL enabled to snoop on the comms. You can connect your phone to it easily enough.
To debug you could sign up for a trial at Contactzilla (this is my product, heads up) and there you can snoop on the process. If you get stuck feel free to shoot me a line.
Wow, fixed 5 minutes later. It's something to do with MongoDB permissions. Since we're in development it's not a big deal, so I just allowed all IPs and it worked. Odd because I allowed my current IP and it didn't work, but that's a temporary fix.
I heard back from the developer of the API. Somehow my IP address had gotten blacklisted. He removed that and everything works. So, nothing wrong with the code.
Could you solve the issue?? I have a similar issue. I am connect to the same server. First I connect with CertA.pfx and all works fine. But when I try to connect the second time to the same server but using CertB.pfx the connectiosn doesn't works, because the JVM is using the CertA.pfx.
If I restart the JVM and I connect first using CertB works fien, but when I try to connect using CertA.pfx the problems is the same.
JL
After racking my brains, my solution was to add this to the top of the problematic page: export const dynamic = 'force-dynamic'
Can you elaborate in your solution? As an administrator I I always see all roles on every contact in the subgrid and not the associated contact role.
All signs point at the possibility of mixed bitness within the same binary on PowerPC. Some CUs in an otherwise 64 bit binary use 32-bit addresses, it looks like.
I had a slightly different variation of this error, in my case it was an over-motivated ts-autocomplete adding a random (and apparently broken) import to some file (import { scan } from "rxjs").
If the fix above doesn't apply, I recommend going through files and looking for suspicious and unused imports.
objTextRng.Font.Color.RGB = System.Drawing.ColorTranslator.ToWin32(System.Drawing.Color.Black);
NodeJS uses OpenSSL under the hood and the code for CTR mode can be found here: ctr128.c implementation An equivalent function in Node.js might look like this:
function ctr128Inc(counter) {
let c = 1;
let n = 16;
do {
n -= 1;
c += counter[n];
counter[n] = c & 0xFF;
c = c >> 8;
} while (n);
}
This function increments the counter by one block. To increment by multiple blocks, you might wrap it as follows:
function incrementIVOpenSSL(iv, increment) {
for (let i = 0; i < increment; i++)
ctr128Inc(iv)
}
However, this method is inefficient for large increments due to its linear time complexity and is practically unusable in real-world applications.
BigIntNode.js introduces the BigInt type, which can handle arbitrarily large integers efficiently. We can utilize it to increment the IV by converting the IV buffer to a BigInt, performing the increment, and converting it back to a Buffer:
const IV_MAX = 0xffffffffffffffffffffffffffffffffn;
const IV_OVERFLOW_MODULO = IV_MAX + 1n;
function incrementIvByFullBlocks(originalIv: Buffer, fullBlocksToIncrement: bigint): Buffer {
let ivBigInt = bufferToBigInt(originalIv);
ivBigInt += fullBlocksToIncrement;
if (ivBigInt > IV_MAX)
ivBigInt %= IV_OVERFLOW_MODULO;
return bigIntToBuffer(ivBigInt);
}
function bufferToBigInt(buffer: Buffer): bigint {
const hexedBuffer = buffer.toString(`hex`);
return BigInt(`0x${hexedBuffer}`);
}
function bigIntToBuffer(bigInt: bigint): Buffer {
const hexedBigInt = bigInt.toString(16).padStart(32, `0`);
return Buffer.from(hexedBigInt, `hex`);
}
Only this method isn't as fast as the one proposed by @youen. On my PC, for 100k iterations, @youn's method finishes in 15ms and BigInt version in 90ms. It is not a big difference though and BigInt version is by far more obvious for a reader.
Another implementation can be found in the crypto-aes-ctr library.
It performs the increment operation more quickly (~7ms for 100,000 iterations) but sacrifices readability. It also supports more edge cases, mostly connected with incrementing IV by very big numbers. Something that probably won't be the case in real-life scenarios for a very long time (until we switch to Petabytes drives).
For a detailed comparison refer to my GitHub gist. The BigInt method and the OpenSSL-inspired function are the only ones passing all edge case tests, with the BigInt approach offering a good balance between performance and readability.
aes-ctr-concurrentTo simplify the process and enhance performance in concurrent environments, I've developed the aes-ctr-concurrent library, available on NPM. This library:
crypto module.I know this question is old but to others that may experience this issue in the future especially with WAMPServer, I have a video on YouTube with link https://youtu.be/Jpp_Z5fHB4g where I demonstrated how I solved the issue for myself on WAMPServer 3.3.0.
Success
I was receiving this error on attempt to push changes to main branch of a repository that I was working on alone.
The reason was that the company policy was changed and it was not possible anymore to push directly to the main branch.
So I created new branch, then PR and then merged the PR. This was the solution.
Typically session_start() is put at the very top of the code, outside of the html tags and within its own php tags. It looks like yours is within the html and that might be causing the problem.
The behavior of comparisons between signed and unsigned types involves type promotion and conversion rules in C. When a signed value is compared with an unsigned value, the signed value is converted to unsigned, which can lead to unexpected results. The %d format specifier prints the actual value for signed types, while for unsigned types, it represents their value as a positive integer.
If you meant to say that the output for d should be 4294967295, that would be correct. The output of printf("%d\n", d); should reflect the unsigned representation, which is not -1.
After racking my brains, my solution was to add this to the top of the problematic page: export const dynamic = 'force-dynamic'
I have a similar problem with the .ipynb notebooks and Spyder:
I can create a new notebook, but when I open one (from drive or from my hard disk) it opens like this:
... "cell_type": "code", "execution_count": 5, "id": "a0223996-7fc1-4d91-a975-00ebba92c6f9", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "<class 'list'>\n", "<class 'numpy.ndarray'>\n" ...
I am not familiar with creating/managing conda environments. I tried it in "base". Then I read that it could be because of a conflict with jupyter lab (or something similar) so I tried to create a new environment (without touching anything), installed Spyder and the problem was there again.
Thanks in advance!
Thanks in advance!
The primary reason is that slices in Go are reference types with internal pointers and capacity metadata, making them inherently mutable and unsuitable for hash maps. Allowing slices as keys could lead to unpredictable behavior.
Directly using slices as map keys is not possible. Use a string representation, a custom struct, or fixed-size arrays as alternatives. The best choice depends on your specific use case and constraints.
Well.... I feel like an idiot. Read the above comments (thank you Siggemannen and Charlieface). I tried refreshing / restarting SSMS but it still marks the linked server as an error (red squiggly line).
I just created the stored procedure, ran a test, and it works. Although...even when I go to modify the stored procedure, it still does marks the linked server as an error/typo... Not sure if that is something I'm doing wrong, but it seems to work now. My OCD may get the best of me by the end of my project, but I'll power through it somehow.
Thank you again
#include <iostream>
#include <chrono>
#include <thread>
#include <algorithm>
using namespace std::chrono;
// c++ makes me go crazy
// this is basically a stopwatch when you press enter it will stop the
//time
//hopefully this can give you kind of a grasp of what you can do with
//chrono
int main()
{
auto start = std::chrono::high_resolution_clock::now();
std::this_thread::sleep_for(0s);
std::cout << "press enter to stop time \n";
std::cin.get();
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<float> duration = end - start;
std::cout << "your time was " << duration.count() << "s " << std::endl;
return 0;
}
What I have done in the past is put the following code in the css file
.dropbtn a:hover {
color: #f1f1f1;
}
this changes the color when the objected is hovered over. If you want to be when clicked, I think there is another one other than hover to do that.
The issue you're experiencing likely has to do with how the browser handles scaling and the viewport, especially on smaller screens. Even though you've set the wrappers to a fixed size of 100px by 100px in your CSS, the page's overall scaling and rendering might be influenced by the default viewport meta tag behavior or other responsive rules.
Here’s what you can check and adjust:
While your CSS specifies width: 100px for small screens, the padding and margin around elements can also contribute to unexpected layout results. Verify how these are affecting your layout when scaled.
Modern devices often have high pixel densities, meaning what you perceive as 385px wide might not correspond to the actual CSS pixels being rendered. The browser scales content to match this ratio.
Your media query for max-width: 450px seems correct, but ensure it’s being applied correctly. You might want to add debugging styles to confirm the applied styles in different screen sizes.
If the scaling still seems incorrect after the adjustments, there may be additional external influences, such as parent container styles or browser-specific quirks, that would require further investigation.
def handle_default_values(df): for column, dtype in df.dtypes: if dtype == 'int': df = df.withColumn(column, when(col(column).isNull(), lit(-1)).otherwise(col(column))) elif (dtype == 'float') or (dtype == 'double') or (dtype =='decimal(18,2)'): df = df.withColumn(column, when(col(column).isNull(), lit(0.0)).otherwise(col(column))) elif dtype == 'string': df = df.withColumn(column, when(col(column).isNull(), lit('UNK')).otherwise(col(column))) elif dtype == 'timestamp': df = df.withColumn(column, when(col(column).isNull(), to_date(lit('1900-01-01'))).otherwise(col(column))) return df
order_job_nullh = handle_default_values(order_job_trans) display(order_job_nullh)
As per documentation and examples, the new way is:
import mbxClient from '@mapbox/mapbox-sdk';
import mbxGeocoding from '@mapbox/mapbox-sdk/services/geocoding';
const mapboxClient = mbxClient({ accessToken: MAPBOX_API });
const geocodingClient = mbxGeocoding(mapboxClient);
geocodingClient.reverseGeocode({
query: [-95.4431142, 33.6875431]
})
.send()
.then(response => {
// GeoJSON document with geocoding matches
const match = response.body;
});
It is simply astounding what poor feedback you got here.
Yes, you can do virtually everything in javascript. Ignore the misguided people who did not understand your situation and suggested you use jQuery. Why pull in a large library when a few lines of javascript can do the job? Do not bother to learn jQuery which is going out of style.
The key thing you did not understand is that you should create a dialog element within the body of your page, not in the document. Add an id to your body element. In your javascript, find it. Create a dialog and use appendChild to put it under body.
From there it's cake. You use document.createElement to make various button and label objects. Use appendChild to add them to your dialog.
Create a Close button and use dialog.close() method to terminate.
Since I could not really find a solution for this, but, however, obviously my whole setup was way to complicated end over-engineered, I ended up in refactor the project to use the second docroot approach which works well (enough). Find more on this in the /.ddev/apache/ folder of your project
Hence this issue is not solved, but might closed for now.
This code will overwrite the sheet named Visible with the contents of the sheet named hidden:
Sheets("Hidden").Cells.Copy Destination:=Sheets("Visible").Range("A1")
You can put code in the Workbook_Open() sub to have it run when the workbook is opened.
I've solved the problem myself.
The problem is caused by the wrong -isystem directories. I should include ${TOOLCHAIN_LLVM_DIR}/lib/clang/${LLVM_VERSION_MAJOR}/include instead of /usr/include/linux. In the stddef.h located in the former directory, several basic types such as ptrdiff_t is defined, and both the two directories contains stdc headers stddef.h.
I've faced the same problem. something going wrong with your network connection. Maybe there is a firewall blocking your connection. Mine was CORS extension installed on my chrome device and it was active which caused prevention of automatic saving in google colab. So, check your network connection and look for anything blocks your connection.
If you're on v1, you can use the advanced.generateId option instead in your auth config. You can also return false if you prefer the database to generate the ID.
The myStruct variable in your test() function is scoped to that function. When you change it after the await, you are modifying the same local variable, even if it runs on a different thread. Each thread does not have its own separate instance of myStruct; they operate on the same function context, but the value type's nature ensures that if you were to pass it to another context, it would be copied rather than shared.
So, to answer your question directly: the myStruct that you are modifying after the sleep is the same instance (in terms of the function's scope) that was created at the beginning of the function. It does not create a separate instance for the background thread; it simply continues to operate on the local variable in that function.
you need to rebuild the project
eas build --profile development --platform android
more info here: https://docs.expo.dev/develop/development-builds/create-a-build/
Needs to be added / allowed by adjusting the TCA of the redirects table. However, there is an open issue regarding record linkhandlers within the redirects module:
https://forge.typo3.org/issues/105648 https://forge.typo3.org/issues/102892
Thanks for the solution, but be careful, this method don't work with node 21, but works well with node 20
Can't assign get_tzdb, auto a = std::chrono::get_tzdb(); will fail, std::chrono::get_tzdb(); does not.
Temporal uses the Go slog package with two handlers - json and text.
Setting --log-format to json alters the time format in the log such that the timezone offset is displayed. However there does not seem to be an option to perform automatic timezone conversion. It displays the current system time with additional timezone information.
temporal server start-dev --log-format json
The problem is with Grafana, you will find a project named Perses (https://perses.dev), it will work fine and has same chart and basic features.
The following variant of the first code has the same critical path as the first code:
void recursive_task(int level)
{
if (level == 0){
usleep(1000);
return;
}
else
{
recursive_task(level-1);
#pragma omp task
{
recursive_task(level-1);
}
#pragma omp task if(0)
{
recursive_task(level-1);
}
#pragma omp taskwait
recursive_task(level-1);
}
}
Due to the taskwait following the two tasks, execution time will not improve, if a thread different from the encountering thread would execute the second task. Using if(0) encourages the OpenMP runtime to execute this task immediately rather than possibly scheduling other tasks first. Using if(0) rather than dropping the task construct completely ensures the scoping as described in @JérômeRichard's answer.
You have to set node js to listen on that port if you want to use that. My advice is to use the debugging panel on the sidebar.
where is a work around for debugging.
next once first app is running select the second app you want to run for me my api my api app ronce both is running you should have mutiple projects running in the debug tool bar and should and should look like this both projects running
Apm-agent by default only enables ssl, without ssl you get this error. try with https://127.0.0.1:8200
I also have this problem when I wanna send an email using SMTP method this is my code:
MailMessage message = new MailMessage("my-email-address", "target-address");
message.Body = "*****";
message.Subject = "Hello";
var client = new SmtpClient("smtp.gmail.com");
client.Port = 587;
client.Credentials = new NetworkCredential("amin h", "***");
client.EnableSsl = true;
client.Send(message);
can anyone help me?
Just add :
canvasColor: Colors.green
in your ThemeData widget.
Peace ✌️
It seems you're experiencing an issue with how the drill-down chart's "Back" button behavior interacts with the layout when multiple ECharts instances are present in the same table row. The behavior you're describing suggests that the setOption method on one chart is inadvertently applying options from another chart.
Here’s a breakdown of potential causes and solutions:
Potential Causes Shared State Between Chart Instances:
If you are reusing variables like option or initializing multiple charts with overlapping configurations, the instances may interfere with each other. DOM Selection Issues:
Using duplicate id attributes or ambiguous DOM queries could lead to the wrong chart being affected by events like setOption. Improper Event Binding:
If event listeners (myChart.on) are not scoped correctly to specific chart instances, they might interfere when multiple charts are rendered.
What you're describing is called "impersonation". Keycloak does support impersonation, as discussed in keycloak's documentation . Just recognize that you'll want to make sure you limit which clients can impersonate users as this is a very security-sensitive operation.
-- User Table CREATE TABLE User ( user_id INT AUTO_INCREMENT PRIMARY KEY, username VARCHAR(50) NOT NULL, email VARCHAR(100) UNIQUE NOT NULL, password VARCHAR(100) NOT NULL, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP );
-- Meal Table CREATE TABLE Meal ( meal_id INT AUTO_INCREMENT PRIMARY KEY, user_id INT, meal_name VARCHAR(50) NOT NULL, meal_date DATE NOT NULL, FOREIGN KEY (user_id) REFERENCES User(user_id) ON DELETE CASCADE );
-- FoodItem Table CREATE TABLE FoodItem ( food_item_id INT AUTO_INCREMENT PRIMARY KEY, food_name VARCHAR(50) NOT NULL, calories_per_100g DECIMAL(5,2), protein_per_100g DECIMAL(5,2), carbs_per_100g DECIMAL(5,2), fats_per_100g DECIMAL(5,2) );
-- Nutrient Table (stores details of food items in each meal) CREATE TABLE Nutrient ( nutrient_id INT AUTO_INCREMENT PRIMARY KEY, meal_id INT, food_item_id INT, quantity_in_grams DECIMAL(5,2) NOT NULL, -- quantity of food item in the meal FOREIGN KEY (meal_id) REFERENCES Meal(meal_id) ON DELETE CASCADE, FOREIGN KEY (food_item_id) REFERENCES FoodItem(food_item_id) ON DELETE CASCADE );
project/ │ ├── main.py └── mymodule/ └── init.py └── mypackage.py
Can you try adding a commit every 10k rows or so?
Try this.
for(int i = 0; i < Preferences.length; i++) {
for(int j = 0; j < Preferences[i].length; j++) {
System.out.println(Preferences[i][j]);
}
}
I just want to update that I fixed the problem. It was a terribly dumb mistake. I was debugging and testing this locally (localhost), but my redirect_uri in GCP was pointing to my production domain, and that was why the flow broke and I was getting null.
Thanks Doug Stevenson for helping bring some clarity to my thought process too. Appreciate it!